hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4c14dac70f7ab53364c7e63f8afba6d9c98097a8 | 6,263 | py | Python | tests/unit/__init__.py | HumanCellAtlas/matrix-service | ed9a23d407cce89127b8f0b662c3d2ef2e8fec77 | [
"MIT"
] | 11 | 2018-10-26T20:47:55.000Z | 2022-02-02T10:32:42.000Z | tests/unit/__init__.py | HumanCellAtlas/matrix-service | ed9a23d407cce89127b8f0b662c3d2ef2e8fec77 | [
"MIT"
] | 379 | 2018-06-04T22:44:33.000Z | 2020-06-03T00:20:08.000Z | tests/unit/__init__.py | HumanCellAtlas/matrix-service | ed9a23d407cce89127b8f0b662c3d2ef2e8fec77 | [
"MIT"
] | 4 | 2018-11-22T01:00:27.000Z | 2020-09-01T16:42:05.000Z | import os
import unittest
import boto3
from moto import mock_dynamodb2, mock_s3, mock_sqs, mock_sts, mock_secretsmanager
os.environ['DEPLOYMENT_STAGE'] = "test_deployment_stage"
os.environ['AWS_DEFAULT_REGION'] = "us-east-1"
os.environ['AWS_ACCESS_KEY_ID'] = "test_ak"
os.environ['AWS_SECRET_ACCESS_KEY'] = "test_sk"
os.environ['LAMBDA_DRIVER_V0_FUNCTION_NAME'] = "test_driver_v0_name"
os.environ['LAMBDA_DRIVER_V1_FUNCTION_NAME'] = "test_driver_v1_name"
os.environ['LAMBDA_NOTIFICATION_FUNCTION_NAME'] = "test_notification_name"
os.environ['DYNAMO_DATA_VERSION_TABLE_NAME'] = "test_data_version_table_name"
os.environ['DYNAMO_DEPLOYMENT_TABLE_NAME'] = "test_deployment_table_name"
os.environ['DYNAMO_REQUEST_TABLE_NAME'] = "test_request_table_name"
os.environ['MATRIX_RESULTS_BUCKET'] = "test_results_bucket"
os.environ['MATRIX_QUERY_BUCKET'] = "test_query_bucket"
os.environ['MATRIX_QUERY_RESULTS_BUCKET'] = "test_query_results_bucket"
os.environ['MATRIX_PRELOAD_BUCKET'] = "test_preload_bucket"
os.environ['MATRIX_REDSHIFT_IAM_ROLE_ARN'] = "test_redshift_role"
os.environ['BATCH_CONVERTER_JOB_QUEUE_ARN'] = "test-job-queue"
os.environ['BATCH_CONVERTER_JOB_DEFINITION_ARN'] = "test-job-definition"
# must be imported after test environment variables are set
from matrix.common.aws.dynamo_handler import DataVersionTableField, DeploymentTableField # noqa
from matrix.common.config import MatrixInfraConfig, MatrixRedshiftConfig # noqa
class MatrixTestCaseUsingMockAWS(unittest.TestCase):
TEST_CONFIG = {
'query_job_q_url': 'test_query_job_q_name',
'query_job_deadletter_q_url': 'test_deadletter_query_job_q_name',
'notification_q_url': 'test_notification_q_url'
}
TEST_REDSHIFT_CONFIG = {
'database_uri': 'test_database_uri',
'redshift_role_arn': 'test_redshift_role_arn'
}
def setUp(self):
self.dynamo_mock = mock_dynamodb2()
self.dynamo_mock.start()
self.s3_mock = mock_s3()
self.s3_mock.start()
self.secrets_mock = mock_secretsmanager()
self.secrets_mock.start()
self.sqs_mock = mock_sqs()
self.sqs_mock.start()
self.sts_mock = mock_sts()
self.sts_mock.start()
self.matrix_infra_config = MatrixInfraConfig()
self.redshift_config = MatrixRedshiftConfig()
self.sqs = boto3.resource('sqs')
self.sqs.create_queue(QueueName=f"test_query_job_q_name")
self.sqs.create_queue(QueueName=f"test_deadletter_query_job_q_name")
self.sqs.create_queue(QueueName=f"test_notification_q_url")
def tearDown(self):
self.dynamo_mock.stop()
self.s3_mock.stop()
@staticmethod
def create_test_data_version_table():
boto3.resource("dynamodb", region_name=os.environ['AWS_DEFAULT_REGION']).create_table(
TableName=os.environ['DYNAMO_DATA_VERSION_TABLE_NAME'],
KeySchema=[
{
'AttributeName': "DataVersion",
'KeyType': "HASH",
}
],
AttributeDefinitions=[
{
'AttributeName': "DataVersion",
'AttributeType': "N",
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 25,
'WriteCapacityUnits': 25,
},
)
@staticmethod
def create_test_deployment_table():
boto3.resource("dynamodb", region_name=os.environ['AWS_DEFAULT_REGION']).create_table(
TableName=os.environ['DYNAMO_DEPLOYMENT_TABLE_NAME'],
KeySchema=[
{
'AttributeName': "Deployment",
'KeyType': "HASH",
}
],
AttributeDefinitions=[
{
'AttributeName': "Deployment",
'AttributeType': "S",
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 25,
'WriteCapacityUnits': 25,
},
)
@staticmethod
def create_test_request_table():
boto3.resource("dynamodb", region_name=os.environ['AWS_DEFAULT_REGION']).create_table(
TableName=os.environ['DYNAMO_REQUEST_TABLE_NAME'],
KeySchema=[
{
'AttributeName': "RequestId",
'KeyType': "HASH",
}
],
AttributeDefinitions=[
{
'AttributeName': "RequestId",
'AttributeType': "S",
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 25,
'WriteCapacityUnits': 25,
},
)
@staticmethod
def init_test_data_version_table():
dynamo = boto3.resource("dynamodb", region_name=os.environ['AWS_DEFAULT_REGION'])
data_version_table = dynamo.Table(os.environ['DYNAMO_DATA_VERSION_TABLE_NAME'])
data_version_table.put_item(
Item={
DataVersionTableField.DATA_VERSION.value: 0,
DataVersionTableField.CREATION_DATE.value: "test_date",
DataVersionTableField.PROJECT_CELL_COUNTS.value: {'test_project': 1},
DataVersionTableField.METADATA_SCHEMA_VERSIONS.value: {},
}
)
@staticmethod
def init_test_deployment_table():
dynamo = boto3.resource("dynamodb", region_name=os.environ['AWS_DEFAULT_REGION'])
deployment_table = dynamo.Table(os.environ['DYNAMO_DEPLOYMENT_TABLE_NAME'])
deployment_table.put_item(
Item={
DeploymentTableField.DEPLOYMENT.value: os.environ['DEPLOYMENT_STAGE'],
DeploymentTableField.CURRENT_DATA_VERSION.value: 0
}
)
@staticmethod
def create_s3_results_bucket():
boto3.resource("s3", region_name=os.environ['AWS_DEFAULT_REGION']) \
.create_bucket(Bucket=os.environ['MATRIX_RESULTS_BUCKET'])
@staticmethod
def create_s3_queries_bucket():
boto3.resource("s3", region_name=os.environ['AWS_DEFAULT_REGION']) \
.create_bucket(Bucket=os.environ['MATRIX_QUERY_BUCKET'])
| 37.957576 | 96 | 0.631327 |
68d2f48de4ec802a711902993b2f426d05a46f6a | 3,838 | py | Python | tests/test_databaselayer_bulk_hash.py | aroiginfraplan/giscube-admin | b7f3131b0186f847f3902df97f982cb288b16a49 | [
"BSD-3-Clause"
] | 5 | 2018-06-07T12:54:35.000Z | 2022-01-14T10:38:38.000Z | tests/test_databaselayer_bulk_hash.py | aroiginfraplan/giscube-admin | b7f3131b0186f847f3902df97f982cb288b16a49 | [
"BSD-3-Clause"
] | 140 | 2018-06-18T10:27:28.000Z | 2022-03-23T09:53:15.000Z | tests/test_databaselayer_bulk_hash.py | aroiginfraplan/giscube-admin | b7f3131b0186f847f3902df97f982cb288b16a49 | [
"BSD-3-Clause"
] | 1 | 2021-04-13T11:20:54.000Z | 2021-04-13T11:20:54.000Z | from unittest import mock
from django.conf import settings
from django.urls import reverse
from django.utils import timezone
from giscube.models import DBConnection, GiscubeTransaction
from layerserver.model_legacy import create_dblayer_model
from layerserver.models import DataBaseLayer
from tests.common import BaseTest
class DataBaseLayerBulkHashAPITestCase(BaseTest):
def setUp(self):
super(self.__class__, self).setUp()
conn = DBConnection()
conn.alias = 'test_connection'
conn.engine = settings.DATABASES['default']['ENGINE']
conn.name = settings.DATABASES['default']['NAME']
conn.user = settings.DATABASES['default']['USER']
conn.password = settings.DATABASES['default']['PASSWORD']
conn.host = settings.DATABASES['default']['HOST']
conn.port = settings.DATABASES['default']['PORT']
conn.save()
layer = DataBaseLayer()
layer.db_connection = conn
layer.name = 'tests_location_25831'
layer.table = 'tests_location_25831'
layer.srid = 25831
layer.pk_field = 'code'
layer.geom_field = 'geometry'
layer.anonymous_view = True
layer.anonymous_add = True
layer.anonymous_update = True
layer.anonymous_delete = True
layer.save()
self.layer = layer
self.locations = []
Location = create_dblayer_model(layer)
self.Location = Location
location = Location()
location.code = 'C001'
location.address = 'C/ Major 1, Salt'
location.geometry = 'POINT(482984.669856201 4647181.21886241)'
location.save()
self.locations.append(location)
def test_bulk_hash(self):
data = {
'ADD': [
{
'code': 'C003',
'address': 'C/ Major 3, Salt',
'geometry': 'POINT (2.79450 41.97642)'
}
],
'UPDATE': [],
'DELETE': [],
'_META': {'time': timezone.now().isoformat()}
}
url = reverse('content-bulk', kwargs={'name': self.layer.name})
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
hash = response.request['HTTP_X_BULK_HASH']
self.assertTrue(GiscubeTransaction.objects.filter(hash=hash).exists())
response1 = response
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response1.json(), response.json())
self.assertEqual(GiscubeTransaction.objects.all().count(), 1)
@mock.patch('layerserver.api.DBLayerContentBulkViewSet.post')
def test_bulk_hash_post_error(self, fake_post):
fake_post.side_effect = Exception('Test')
data = {
'ADD': [
{
'code': 'C003',
'address': 'C/ Major 3, Salt',
'geometry': 'POINT (2.79450 41.97642)'
}
],
'UPDATE': [],
'DELETE': [],
'_META': {'time': timezone.now().isoformat()}
}
url = reverse('content-bulk', kwargs={'name': self.layer.name})
try:
self.client.post(url, data)
except Exception:
pass
try:
self.client.post(url, data)
except Exception:
pass
_, hash = self.client.bulk_hash(data)
filter = {'hash': hash, 'response_status_code': 500}
transactions = GiscubeTransaction.objects.filter(**filter)
self.assertEqual(transactions.count(), 2)
first = transactions.first()
last = transactions.last()
self.assertEqual(first.hash, last.hash)
self.assertEqual(transactions.first().error, 'Test')
| 33.373913 | 78 | 0.583898 |
2a8bda0558d69dc0ca75e8b9e4834c31110c89fe | 2,300 | py | Python | src/modules/voice_roles.py | ProffDea/DiscordBot | 50948503a3b376c6a01ce0630bbf5684d28d08aa | [
"MIT"
] | null | null | null | src/modules/voice_roles.py | ProffDea/DiscordBot | 50948503a3b376c6a01ce0630bbf5684d28d08aa | [
"MIT"
] | null | null | null | src/modules/voice_roles.py | ProffDea/DiscordBot | 50948503a3b376c6a01ce0630bbf5684d28d08aa | [
"MIT"
] | null | null | null | import discord
from sqlalchemy.ext.asyncio import AsyncSession
from src.postgres import database
async def get_role(session: AsyncSession, voice_channel: discord.VoiceChannel):
voice_local_role_exist = await database.voice_local_role_exists(
session,
voice_channel.id
)
if voice_local_role_exist:
role_id = await database.get_voice_local_role(
session,
voice_channel.id
)
role = voice_channel.guild.get_role(role_id)
return role
elif not voice_local_role_exist:
voice_global_role_exist = await database.voice_global_role_exists(
session,
voice_channel.guild.id
)
if voice_global_role_exist:
role_id = await database.get_voice_global_role(
session,
voice_channel.guild.id
)
role = voice_channel.guild.get_role(role_id)
return role
return None
async def check(
session: AsyncSession,
member: discord.Member,
before: discord.VoiceState,
after: discord.VoiceState
):
if (
before.channel and not after.channel or
after.channel and member.guild.afk_channel and
after.channel == member.guild.afk_channel
): # Leaving channel or joining afk channel
if (
after.channel and member.guild.afk_channel and
after.channel.id == member.guild.afk_channel.id
):
before = after
role = await get_role(session, before.channel)
if role and role in member.roles:
await member.remove_roles(role)
elif (
not before.channel and after.channel or
before.channel and after.channel and
before.channel != after.channel
): # Joining channel or moving between channels
role_after = await get_role(session, after.channel)
if role_after and role_after not in member.roles:
await member.add_roles(role_after)
if before.channel and after.channel:
role_before = await get_role(session, before.channel)
if (
role_before and role_before != role_after
and role_before in member.roles
):
await member.remove_roles(role_before)
| 33.823529 | 79 | 0.636522 |
385c8df878a39027c74faa0185bb510f419016c2 | 1,925 | py | Python | evaluation/scripts/fix_bleu.py | zpapakipos/dynabench-1 | 95884b4e29c57263dc1a85909be979c084d5fac3 | [
"MIT"
] | 15 | 2021-09-24T00:46:04.000Z | 2022-03-16T13:24:56.000Z | evaluation/scripts/fix_bleu.py | zpapakipos/dynabench-1 | 95884b4e29c57263dc1a85909be979c084d5fac3 | [
"MIT"
] | 98 | 2021-09-22T12:33:21.000Z | 2022-03-21T22:23:52.000Z | evaluation/scripts/fix_bleu.py | zpapakipos/dynabench-1 | 95884b4e29c57263dc1a85909be979c084d5fac3 | [
"MIT"
] | 12 | 2021-09-25T05:08:18.000Z | 2022-02-28T21:02:20.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# isort:skip_file
import functools
import json
import logging
import sys
import func_argparse
sys.path.append("..") # noqa
sys.path.append("../../api") # noqa
from datasets.mt import flores # isort:skip
from models.dataset import Dataset, DatasetModel # isort:skip
from models.score import ScoreModel # isort:skip
from models.task import TaskModel # isort:skip
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("fix_bleu")
@functools.lru_cache()
def _get_dataset_name(dm: DatasetModel, did: int) -> str:
return dm.dbs.query(Dataset.name).filter(Dataset.id == did).one()[0]
def main(task_code: str = "flores_small1", writeback: bool = False):
"""Fix the BLEU score in the current DB.
Dry-run by default, pass --writeback to actually write to the DB
Note that this modification is idempotent, it's safe to run it several time.
We are only modifying the overall "perf" field, not the per language scores.
"""
sm = ScoreModel()
dm = DatasetModel()
tid = TaskModel().getByTaskCode(task_code).id
perf_metric = "sp_bleu"
for score in sm.getByTid(tid):
if not score.metadata_json:
continue
metadata_json = json.loads(score.metadata_json)
fixed = flores.compute_averages(perf_metric, metadata_json["perf_by_tag"])
old_bleu = score.perf
new_bleu = fixed["perf"]
if writeback:
sm.update(score.id, perf=fixed["perf"], pretty_perf=fixed["pretty_perf"])
logger.info(
f"Fixed score for model '{score.model.name}' ({score.mid}) on dataset "
+ f"'{_get_dataset_name(dm, score.did)}': {old_bleu} -> {new_bleu}"
)
if __name__ == "__main__":
func_argparse.single_main(main)
| 32.627119 | 85 | 0.688312 |
dd6124e8f0b5401bce778ecc200d95282b456843 | 4,849 | py | Python | python_data/fintech/common_utils.py | younhapan/ystdoc | a3fee3c48fc4e35b26b70ab7d9f123be059a4a7a | [
"Apache-2.0"
] | null | null | null | python_data/fintech/common_utils.py | younhapan/ystdoc | a3fee3c48fc4e35b26b70ab7d9f123be059a4a7a | [
"Apache-2.0"
] | null | null | null | python_data/fintech/common_utils.py | younhapan/ystdoc | a3fee3c48fc4e35b26b70ab7d9f123be059a4a7a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
import time
import datetime
import hashlib
import traceback
import math
import numpy
class CommonUtils(object) :
def __init__(self) :
pass
def str_to_datetime(self,str_time) :
"""
将YY-mm-dd HH:MM:SS 类型字符串转化成datetime
"""
return datetime.datetime.strptime(str(str_time),'%Y-%m-%d %H:%M:%S')
def str_to_time(self,str_time):
"""
将YY-mm-dd HH:MM:SS 类型字符串转化成unix 时间戳
"""
return time.mktime(time.strptime(str(str_time)[:19],'%Y-%m-%d %H:%M:%S'))
def list_excavate(self,_list) :
return {
'len':self.list_len(_list),
'sum':self.list_sum(_list),
'ave':self.list_ave(_list),
'max':self.list_max(_list),
'min':self.list_min(_list),
'median':self.list_median(_list),
'variance':self.list_variance(_list),
'standard':self.list_standard(_list),
}
def matchming_number(self, matchming_number_list, ii) :
"""
#将matchming_number_list的数据拟合成一次函数 获取x坐标为ii的数据
#matchming_number_list 是个元组的list (x,y)
"""
matchming_number_list = sorted(matchming_number_list,key=lambda matchming_number:matchming_number[0])
X = []
Y = []
for (a,b) in matchming_number_list :
X.append(a)
Y.append(b)
zx = numpy.polyfit(X, Y, 1)
func = numpy.poly1d(zx)
return func(ii)
def list_len(self,_list) :
"""
list 长度
"""
if type(_list) != list or len(_list) == 0:
return 'nan'
return int(len(_list))
def list_sum(self,_list) :
"""
list 求和
"""
try :
return sum(_list) if len(_list) != 0 else 'nan'
except :
return 'err'
def list_ave(self,_list) :
"""
list 平均值
"""
try :
return sum(_list) * 1.0 / len(_list) if len(_list) != 0 else 'nan'
except :
return 'err'
def list_max(self,_list) :
"""
list 最大值
"""
try :
return max(_list) if len(_list) != 0 else 'nan'
except :
return 'err'
def list_min(self,_list) :
"""
list 最小值
"""
try :
return min(_list) if len(_list) != 0 else 'nan'
except :
return 'err'
def list_median(self,_list) :
"""
list 中位数
"""
try :
if len(_list) == 0 : return 'nan'
_list = sorted(_list)
if len(_list) % 2 == 1 :
return _list[len(_list)//2]
else :
return ( _list[len(_list)//2-1] + _list[len(_list)//2] ) * 1.0 / 2
except :
return 'err'
def list_variance(self,_list) :
"""
list 方差
"""
try :
if len(_list) == 0 : return 'nan'
ex=float(sum(_list))/len(_list)
s=0
for i in _list:
s+=(i-ex)**2
return float(s)/len(_list)
except :
return 'err'
def list_standard(self,_list) :
"""
list 标准差
"""
try :
if len(_list) == 0 : return 'nan'
ex=float(sum(_list))/len(_list);
s=0;
for i in _list:
s+=(i-ex)**2;
return math.sqrt(float(s)/len(_list));
except :
return 'err'
def md5(self,_str):
"""
将str转化为md5值
"""
m = hashlib.md5()
m.update(_str)
return m.hexdigest()
def sigmoid(self,w1,w2) :
"""
sigmoid函数
"""
def get_prob_by_weight(w1,w2) :
new_weight = w1 + w2
new_prob = 1.0 / (1 + math.e**(-1.0 * new_weight))
return new_prob
return get_prob_by_weight(float(w1),float(w2))
def overdue_days(self,auto_repay_time,finish_repay_time):
"""
通过auto_repay_time 和 finish_repay_time 计算 逾期天数 copy hydra
0:未逾期
-2:未放款
-1:未到期
"""
if auto_repay_time:
if auto_repay_time and auto_repay_time > datetime.datetime.now()+datetime.timedelta(hours=-8):
return -2
dt = finish_repay_time if finish_repay_time else datetime.datetime.now()+datetime.timedelta(hours=-8)
deti = dt - auto_repay_time
days = deti.days
seconds = deti.seconds
if days < 0:
num = 0
elif days == 0:
if seconds < 12 * 60 * 60:
num = 0
else:
num = 1
elif days > 0:
num = days + 1
else:
num = -1
return num | 26.353261 | 113 | 0.480305 |
3aae8c8694886233ed379a2e34713859bc724086 | 7,634 | py | Python | pypy/module/_cffi_backend/test/test_re_python.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 381 | 2018-08-18T03:37:22.000Z | 2022-02-06T23:57:36.000Z | pypy/module/_cffi_backend/test/test_re_python.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 16 | 2018-09-22T18:12:47.000Z | 2022-02-22T20:03:59.000Z | pypy/module/_cffi_backend/test/test_re_python.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 30 | 2018-08-20T03:16:34.000Z | 2022-01-12T17:39:22.000Z | import py
from rpython.tool.udir import udir
from pypy.interpreter.gateway import interp2app
from pypy.module._cffi_backend.newtype import _clean_cache
class AppTestRecompilerPython:
spaceconfig = dict(usemodules=['_cffi_backend'])
def setup_class(cls):
try:
from cffi import FFI # <== the system one, which
from cffi import recompiler # needs to be at least cffi 1.0.0
from cffi import ffiplatform
except ImportError:
py.test.skip("system cffi module not found or older than 1.0.0")
space = cls.space
SRC = """
#define FOOBAR (-42)
static const int FOOBAZ = -43;
#define BIGPOS 420000000000L
#define BIGNEG -420000000000L
int add42(int x) { return x + 42; }
int globalvar42 = 1234;
const int globalconst42 = 4321;
const char *const globalconsthello = "hello";
struct foo_s;
typedef struct bar_s { int x; signed char a[]; } bar_t;
enum foo_e { AA, BB, CC };
void init_test_re_python(void) { } /* windows hack */
void PyInit__test_re_python(void) { } /* windows hack */
"""
tmpdir = udir.join('test_re_python')
tmpdir.ensure(dir=1)
c_file = tmpdir.join('_test_re_python.c')
c_file.write(SRC)
ext = ffiplatform.get_extension(str(c_file), '_test_re_python',
export_symbols=['add42', 'globalvar42',
'globalconst42', 'globalconsthello'])
outputfilename = ffiplatform.compile(str(tmpdir), ext)
cls.w_extmod = space.wrap(outputfilename)
#mod.tmpdir = tmpdir
#
ffi = FFI()
ffi.cdef("""
#define FOOBAR -42
static const int FOOBAZ = -43;
#define BIGPOS 420000000000L
#define BIGNEG -420000000000L
int add42(int);
int globalvar42;
const int globalconst42;
const char *const globalconsthello = "hello";
int no_such_function(int);
int no_such_globalvar;
struct foo_s;
typedef struct bar_s { int x; signed char a[]; } bar_t;
enum foo_e { AA, BB, CC };
""")
ffi.set_source('re_python_pysrc', None)
ffi.emit_python_code(str(tmpdir.join('re_python_pysrc.py')))
#
sub_ffi = FFI()
sub_ffi.cdef("static const int k2 = 121212;")
sub_ffi.include(ffi)
assert 'macro FOOBAR' in ffi._parser._declarations
assert 'macro FOOBAZ' in ffi._parser._declarations
sub_ffi.set_source('re_py_subsrc', None)
sub_ffi.emit_python_code(str(tmpdir.join('re_py_subsrc.py')))
#
cls.w_fix_path = space.appexec([space.wrap(str(tmpdir))], """(path):
def fix_path(ignored=None):
import _cffi_backend # force it to be initialized
import sys
if path not in sys.path:
sys.path.insert(0, path)
return fix_path
""")
def teardown_method(self, meth):
self.space.appexec([], """():
import sys
for name in ['re_py_subsrc', 're_python_pysrc']:
if name in sys.modules:
del sys.modules[name]
""")
_clean_cache(self.space)
def test_constant_1(self):
self.fix_path()
from re_python_pysrc import ffi
assert ffi.integer_const('FOOBAR') == -42
assert ffi.integer_const('FOOBAZ') == -43
def test_large_constant(self):
self.fix_path()
from re_python_pysrc import ffi
assert ffi.integer_const('BIGPOS') == 420000000000
assert ffi.integer_const('BIGNEG') == -420000000000
def test_function(self):
import _cffi_backend
self.fix_path()
from re_python_pysrc import ffi
lib = ffi.dlopen(self.extmod)
assert lib.add42(-10) == 32
assert type(lib.add42) is _cffi_backend.FFI.CData
def test_dlclose(self):
import _cffi_backend
self.fix_path()
from re_python_pysrc import ffi
lib = ffi.dlopen(self.extmod)
ffi.dlclose(lib)
e = raises(ffi.error, getattr, lib, 'add42')
assert str(e.value) == (
"library '%s' has been closed" % (self.extmod,))
ffi.dlclose(lib) # does not raise
def test_constant_via_lib(self):
self.fix_path()
from re_python_pysrc import ffi
lib = ffi.dlopen(self.extmod)
assert lib.FOOBAR == -42
assert lib.FOOBAZ == -43
def test_opaque_struct(self):
self.fix_path()
from re_python_pysrc import ffi
ffi.cast("struct foo_s *", 0)
raises(TypeError, ffi.new, "struct foo_s *")
def test_nonopaque_struct(self):
self.fix_path()
from re_python_pysrc import ffi
for p in [ffi.new("struct bar_s *", [5, b"foobar"]),
ffi.new("bar_t *", [5, b"foobar"])]:
assert p.x == 5
assert p.a[0] == ord('f')
assert p.a[5] == ord('r')
def test_enum(self):
self.fix_path()
from re_python_pysrc import ffi
assert ffi.integer_const("BB") == 1
e = ffi.cast("enum foo_e", 2)
assert ffi.string(e) == "CC"
def test_include_1(self):
self.fix_path()
from re_py_subsrc import ffi
assert ffi.integer_const('FOOBAR') == -42
assert ffi.integer_const('FOOBAZ') == -43
assert ffi.integer_const('k2') == 121212
lib = ffi.dlopen(self.extmod) # <- a random unrelated library would be fine
assert lib.FOOBAR == -42
assert lib.FOOBAZ == -43
assert lib.k2 == 121212
#
p = ffi.new("bar_t *", [5, b"foobar"])
assert p.a[4] == ord('a')
def test_global_var(self):
self.fix_path()
from re_python_pysrc import ffi
lib = ffi.dlopen(self.extmod)
assert lib.globalvar42 == 1234
p = ffi.addressof(lib, 'globalvar42')
lib.globalvar42 += 5
assert p[0] == 1239
p[0] -= 1
assert lib.globalvar42 == 1238
def test_global_const_int(self):
self.fix_path()
from re_python_pysrc import ffi
lib = ffi.dlopen(self.extmod)
assert lib.globalconst42 == 4321
raises(AttributeError, ffi.addressof, lib, 'globalconst42')
def test_global_const_nonint(self):
self.fix_path()
from re_python_pysrc import ffi
lib = ffi.dlopen(self.extmod)
assert ffi.string(lib.globalconsthello, 8) == "hello"
raises(AttributeError, ffi.addressof, lib, 'globalconsthello')
def test_rtld_constants(self):
self.fix_path()
from re_python_pysrc import ffi
ffi.RTLD_NOW # check that we have the attributes
ffi.RTLD_LAZY
ffi.RTLD_GLOBAL
def test_no_such_function_or_global_var(self):
self.fix_path()
from re_python_pysrc import ffi
lib = ffi.dlopen(self.extmod)
e = raises(ffi.error, getattr, lib, 'no_such_function')
assert str(e.value).startswith(
"symbol 'no_such_function' not found in library '")
e = raises(ffi.error, getattr, lib, 'no_such_globalvar')
assert str(e.value).startswith(
"symbol 'no_such_globalvar' not found in library '")
def test_check_version(self):
import _cffi_backend
e = raises(ImportError, _cffi_backend.FFI,
"foobar", _version=0x2594)
assert str(e.value).startswith(
"cffi out-of-line Python module 'foobar' has unknown version")
| 35.672897 | 87 | 0.592874 |
198d03494610af81fb846d62298cb443e115fe82 | 22,467 | py | Python | pdfminer/layout.py | hason-contributions/pdfminer.six | fd63dbf62e291f6cba5beda968e6f9c480f20033 | [
"MIT"
] | null | null | null | pdfminer/layout.py | hason-contributions/pdfminer.six | fd63dbf62e291f6cba5beda968e6f9c480f20033 | [
"MIT"
] | null | null | null | pdfminer/layout.py | hason-contributions/pdfminer.six | fd63dbf62e291f6cba5beda968e6f9c480f20033 | [
"MIT"
] | null | null | null |
from .utils import INF
from .utils import Plane
from .utils import get_bound
from .utils import uniq
from .utils import csort
from .utils import fsplit
from .utils import bbox2str
from .utils import matrix2str
from .utils import apply_matrix_pt
import six # Python 2+3 compatibility
## IndexAssigner
##
class IndexAssigner(object):
def __init__(self, index=0):
self.index = index
return
def run(self, obj):
if isinstance(obj, LTTextBox):
obj.index = self.index
self.index += 1
elif isinstance(obj, LTTextGroup):
for x in obj:
self.run(x)
return
## LAParams
##
class LAParams(object):
def __init__(self,
line_overlap=0.5,
char_margin=2.0,
line_margin=0.5,
word_margin=0.1,
boxes_flow=0.5,
detect_vertical=False,
all_texts=False):
self.line_overlap = line_overlap
self.char_margin = char_margin
self.line_margin = line_margin
self.word_margin = word_margin
self.boxes_flow = boxes_flow
self.detect_vertical = detect_vertical
self.all_texts = all_texts
return
def __repr__(self):
return ('<LAParams: char_margin=%.1f, line_margin=%.1f, word_margin=%.1f all_texts=%r>' %
(self.char_margin, self.line_margin, self.word_margin, self.all_texts))
## LTItem
##
class LTItem(object):
def analyze(self, laparams):
"""Perform the layout analysis."""
return
## LTText
##
class LTText(object):
def __repr__(self):
return ('<%s %r>' %
(self.__class__.__name__, self.get_text()))
def get_text(self):
raise NotImplementedError
## LTComponent
##
class LTComponent(LTItem):
def __init__(self, bbox):
LTItem.__init__(self)
self.set_bbox(bbox)
return
def __repr__(self):
return ('<%s %s>' %
(self.__class__.__name__, bbox2str(self.bbox)))
# Disable comparison.
def __lt__(self, _):
raise ValueError
def __le__(self, _):
raise ValueError
def __gt__(self, _):
raise ValueError
def __ge__(self, _):
raise ValueError
def set_bbox(self, bbox):
(x0, y0, x1, y1) = bbox
self.x0 = x0
self.y0 = y0
self.x1 = x1
self.y1 = y1
self.width = x1-x0
self.height = y1-y0
self.bbox = bbox
return
def is_empty(self):
return self.width <= 0 or self.height <= 0
def is_hoverlap(self, obj):
assert isinstance(obj, LTComponent)
return obj.x0 <= self.x1 and self.x0 <= obj.x1
def hdistance(self, obj):
assert isinstance(obj, LTComponent)
if self.is_hoverlap(obj):
return 0
else:
return min(abs(self.x0-obj.x1), abs(self.x1-obj.x0))
def hoverlap(self, obj):
assert isinstance(obj, LTComponent)
if self.is_hoverlap(obj):
return min(abs(self.x0-obj.x1), abs(self.x1-obj.x0))
else:
return 0
def is_voverlap(self, obj):
assert isinstance(obj, LTComponent)
return obj.y0 <= self.y1 and self.y0 <= obj.y1
def vdistance(self, obj):
assert isinstance(obj, LTComponent)
if self.is_voverlap(obj):
return 0
else:
return min(abs(self.y0-obj.y1), abs(self.y1-obj.y0))
def voverlap(self, obj):
assert isinstance(obj, LTComponent)
if self.is_voverlap(obj):
return min(abs(self.y0-obj.y1), abs(self.y1-obj.y0))
else:
return 0
## LTCurve
##
class LTCurve(LTComponent):
def __init__(self, linewidth, pts, stroke = False, fill = False, evenodd = False, stroking_color = None, non_stroking_color = None):
LTComponent.__init__(self, get_bound(pts))
self.pts = pts
self.linewidth = linewidth
self.stroke = stroke
self.fill = fill
self.evenodd = evenodd
self.stroking_color = stroking_color
self.non_stroking_color = non_stroking_color
return
def get_pts(self):
return ','.join('%.3f,%.3f' % p for p in self.pts)
## LTLine
##
class LTLine(LTCurve):
def __init__(self, linewidth, p0, p1, stroke = False, fill = False, evenodd = False, stroking_color = None, non_stroking_color = None):
LTCurve.__init__(self, linewidth, [p0, p1], stroke, fill, evenodd, stroking_color, non_stroking_color)
return
## LTRect
##
class LTRect(LTCurve):
def __init__(self, linewidth, bbox, stroke = False, fill = False, evenodd = False, stroking_color = None, non_stroking_color = None):
(x0, y0, x1, y1) = bbox
LTCurve.__init__(self, linewidth, [(x0, y0), (x1, y0), (x1, y1), (x0, y1)], stroke, fill, evenodd, stroking_color, non_stroking_color)
return
## LTImage
##
class LTImage(LTComponent):
def __init__(self, name, stream, bbox):
LTComponent.__init__(self, bbox)
self.name = name
self.stream = stream
self.srcsize = (stream.get_any(('W', 'Width')),
stream.get_any(('H', 'Height')))
self.imagemask = stream.get_any(('IM', 'ImageMask'))
self.bits = stream.get_any(('BPC', 'BitsPerComponent'), 1)
self.colorspace = stream.get_any(('CS', 'ColorSpace'))
if not isinstance(self.colorspace, list):
self.colorspace = [self.colorspace]
return
def __repr__(self):
return ('<%s(%s) %s %r>' %
(self.__class__.__name__, self.name,
bbox2str(self.bbox), self.srcsize))
## LTAnno
##
class LTAnno(LTItem, LTText):
def __init__(self, text):
self._text = text
return
def get_text(self):
return self._text
## LTChar
##
class LTChar(LTComponent, LTText):
def __init__(self, matrix, font, fontsize, scaling, rise,
text, textwidth, textdisp):
LTText.__init__(self)
self._text = text
self.matrix = matrix
self.fontname = font.fontname
self.adv = textwidth * fontsize * scaling
# compute the boundary rectangle.
if font.is_vertical():
# vertical
width = font.get_width() * fontsize
(vx, vy) = textdisp
if vx is None:
vx = width * 0.5
else:
vx = vx * fontsize * .001
vy = (1000 - vy) * fontsize * .001
tx = -vx
ty = vy + rise
bll = (tx, ty+self.adv)
bur = (tx+width, ty)
else:
# horizontal
height = font.get_height() * fontsize
descent = font.get_descent() * fontsize
ty = descent + rise
bll = (0, ty)
bur = (self.adv, ty+height)
(a, b, c, d, e, f) = self.matrix
self.upright = (0 < a*d*scaling and b*c <= 0)
(x0, y0) = apply_matrix_pt(self.matrix, bll)
(x1, y1) = apply_matrix_pt(self.matrix, bur)
if x1 < x0:
(x0, x1) = (x1, x0)
if y1 < y0:
(y0, y1) = (y1, y0)
LTComponent.__init__(self, (x0, y0, x1, y1))
if font.is_vertical():
self.size = self.width
else:
self.size = self.height
return
def __repr__(self):
return ('<%s %s matrix=%s font=%r adv=%s text=%r>' %
(self.__class__.__name__, bbox2str(self.bbox),
matrix2str(self.matrix), self.fontname, self.adv,
self.get_text()))
def get_text(self):
return self._text
def is_compatible(self, obj):
"""Returns True if two characters can coexist in the same line."""
return True
## LTContainer
##
class LTContainer(LTComponent):
def __init__(self, bbox):
LTComponent.__init__(self, bbox)
self._objs = []
return
def __iter__(self):
return iter(self._objs)
def __len__(self):
return len(self._objs)
def add(self, obj):
self._objs.append(obj)
return
def extend(self, objs):
for obj in objs:
self.add(obj)
return
def analyze(self, laparams):
for obj in self._objs:
obj.analyze(laparams)
return
## LTExpandableContainer
##
class LTExpandableContainer(LTContainer):
def __init__(self):
LTContainer.__init__(self, (+INF, +INF, -INF, -INF))
return
def add(self, obj):
LTContainer.add(self, obj)
self.set_bbox((min(self.x0, obj.x0), min(self.y0, obj.y0),
max(self.x1, obj.x1), max(self.y1, obj.y1)))
return
## LTTextContainer
##
class LTTextContainer(LTExpandableContainer, LTText):
def __init__(self):
LTText.__init__(self)
LTExpandableContainer.__init__(self)
return
def get_text(self):
return ''.join(obj.get_text() for obj in self if isinstance(obj, LTText))
## LTTextLine
##
class LTTextLine(LTTextContainer):
def __init__(self, word_margin):
LTTextContainer.__init__(self)
self.word_margin = word_margin
return
def __repr__(self):
return ('<%s %s %r>' %
(self.__class__.__name__, bbox2str(self.bbox),
self.get_text()))
def analyze(self, laparams):
LTTextContainer.analyze(self, laparams)
LTContainer.add(self, LTAnno('\n'))
return
def find_neighbors(self, plane, ratio):
raise NotImplementedError
class LTTextLineHorizontal(LTTextLine):
def __init__(self, word_margin):
LTTextLine.__init__(self, word_margin)
self._x1 = +INF
return
def add(self, obj):
if isinstance(obj, LTChar) and self.word_margin:
margin = self.word_margin * max(obj.width, obj.height)
if self._x1 < obj.x0-margin:
LTContainer.add(self, LTAnno(' '))
self._x1 = obj.x1
LTTextLine.add(self, obj)
return
def find_neighbors(self, plane, ratio):
d = ratio*self.height
objs = plane.find((self.x0, self.y0-d, self.x1, self.y1+d))
return [obj for obj in objs
if (isinstance(obj, LTTextLineHorizontal) and
abs(obj.height-self.height) < d and
(abs(obj.x0-self.x0) < d or
abs(obj.x1-self.x1) < d))]
class LTTextLineVertical(LTTextLine):
def __init__(self, word_margin):
LTTextLine.__init__(self, word_margin)
self._y0 = -INF
return
def add(self, obj):
if isinstance(obj, LTChar) and self.word_margin:
margin = self.word_margin * max(obj.width, obj.height)
if obj.y1+margin < self._y0:
LTContainer.add(self, LTAnno(' '))
self._y0 = obj.y0
LTTextLine.add(self, obj)
return
def find_neighbors(self, plane, ratio):
d = ratio*self.width
objs = plane.find((self.x0-d, self.y0, self.x1+d, self.y1))
return [obj for obj in objs
if (isinstance(obj, LTTextLineVertical) and
abs(obj.width-self.width) < d and
(abs(obj.y0-self.y0) < d or
abs(obj.y1-self.y1) < d))]
## LTTextBox
##
## A set of text objects that are grouped within
## a certain rectangular area.
##
class LTTextBox(LTTextContainer):
def __init__(self):
LTTextContainer.__init__(self)
self.index = -1
return
def __repr__(self):
return ('<%s(%s) %s %r>' %
(self.__class__.__name__,
self.index, bbox2str(self.bbox), self.get_text()))
class LTTextBoxHorizontal(LTTextBox):
def analyze(self, laparams):
LTTextBox.analyze(self, laparams)
self._objs = csort(self._objs, key=lambda obj: -obj.y1)
return
def get_writing_mode(self):
return 'lr-tb'
class LTTextBoxVertical(LTTextBox):
def analyze(self, laparams):
LTTextBox.analyze(self, laparams)
self._objs = csort(self._objs, key=lambda obj: -obj.x1)
return
def get_writing_mode(self):
return 'tb-rl'
## LTTextGroup
##
class LTTextGroup(LTTextContainer):
def __init__(self, objs):
LTTextContainer.__init__(self)
self.extend(objs)
return
class LTTextGroupLRTB(LTTextGroup):
def analyze(self, laparams):
LTTextGroup.analyze(self, laparams)
# reorder the objects from top-left to bottom-right.
self._objs = csort(self._objs, key=lambda obj:
(1-laparams.boxes_flow)*(obj.x0) -
(1+laparams.boxes_flow)*(obj.y0+obj.y1))
return
class LTTextGroupTBRL(LTTextGroup):
def analyze(self, laparams):
LTTextGroup.analyze(self, laparams)
# reorder the objects from top-right to bottom-left.
self._objs = csort(self._objs, key=lambda obj:
-(1+laparams.boxes_flow)*(obj.x0+obj.x1)
- (1-laparams.boxes_flow)*(obj.y1))
return
## LTLayoutContainer
##
class LTLayoutContainer(LTContainer):
def __init__(self, bbox):
LTContainer.__init__(self, bbox)
self.groups = None
return
# group_objects: group text object to textlines.
def group_objects(self, laparams, objs):
obj0 = None
line = None
for obj1 in objs:
if obj0 is not None:
# halign: obj0 and obj1 is horizontally aligned.
#
# +------+ - - -
# | obj0 | - - +------+ -
# | | | obj1 | | (line_overlap)
# +------+ - - | | -
# - - - +------+
#
# |<--->|
# (char_margin)
halign = (obj0.is_compatible(obj1) and
obj0.is_voverlap(obj1) and
(min(obj0.height, obj1.height) * laparams.line_overlap <
obj0.voverlap(obj1)) and
(obj0.hdistance(obj1) <
max(obj0.width, obj1.width) * laparams.char_margin))
# valign: obj0 and obj1 is vertically aligned.
#
# +------+
# | obj0 |
# | |
# +------+ - - -
# | | | (char_margin)
# +------+ - -
# | obj1 |
# | |
# +------+
#
# |<-->|
# (line_overlap)
valign = (laparams.detect_vertical and
obj0.is_compatible(obj1) and
obj0.is_hoverlap(obj1) and
(min(obj0.width, obj1.width) * laparams.line_overlap <
obj0.hoverlap(obj1)) and
(obj0.vdistance(obj1) <
max(obj0.height, obj1.height) * laparams.char_margin))
if ((halign and isinstance(line, LTTextLineHorizontal)) or
(valign and isinstance(line, LTTextLineVertical))):
line.add(obj1)
elif line is not None:
yield line
line = None
else:
if valign and not halign:
line = LTTextLineVertical(laparams.word_margin)
line.add(obj0)
line.add(obj1)
elif halign and not valign:
line = LTTextLineHorizontal(laparams.word_margin)
line.add(obj0)
line.add(obj1)
else:
line = LTTextLineHorizontal(laparams.word_margin)
line.add(obj0)
yield line
line = None
obj0 = obj1
if line is None:
line = LTTextLineHorizontal(laparams.word_margin)
line.add(obj0)
yield line
return
# group_textlines: group neighboring lines to textboxes.
def group_textlines(self, laparams, lines):
plane = Plane(self.bbox)
plane.extend(lines)
boxes = {}
for line in lines:
neighbors = line.find_neighbors(plane, laparams.line_margin)
if line not in neighbors: continue
members = []
for obj1 in neighbors:
members.append(obj1)
if obj1 in boxes:
members.extend(boxes.pop(obj1))
if isinstance(line, LTTextLineHorizontal):
box = LTTextBoxHorizontal()
else:
box = LTTextBoxVertical()
for obj in uniq(members):
box.add(obj)
boxes[obj] = box
done = set()
for line in lines:
if line not in boxes: continue
box = boxes[line]
if box in done:
continue
done.add(box)
if not box.is_empty():
yield box
return
# group_textboxes: group textboxes hierarchically.
def group_textboxes(self, laparams, boxes):
assert boxes
def dist(obj1, obj2):
"""A distance function between two TextBoxes.
Consider the bounding rectangle for obj1 and obj2.
Return its area less the areas of obj1 and obj2,
shown as 'www' below. This value may be negative.
+------+..........+ (x1, y1)
| obj1 |wwwwwwwwww:
+------+www+------+
:wwwwwwwwww| obj2 |
(x0, y0) +..........+------+
"""
x0 = min(obj1.x0, obj2.x0)
y0 = min(obj1.y0, obj2.y0)
x1 = max(obj1.x1, obj2.x1)
y1 = max(obj1.y1, obj2.y1)
return ((x1-x0)*(y1-y0) - obj1.width*obj1.height - obj2.width*obj2.height)
def isany(obj1, obj2):
"""Check if there's any other object between obj1 and obj2.
"""
x0 = min(obj1.x0, obj2.x0)
y0 = min(obj1.y0, obj2.y0)
x1 = max(obj1.x1, obj2.x1)
y1 = max(obj1.y1, obj2.y1)
objs = set(plane.find((x0, y0, x1, y1)))
return objs.difference((obj1, obj2))
def key_obj(t):
(c,d,_,_) = t
return (c,d)
# XXX this still takes O(n^2) :(
dists = []
for i in range(len(boxes)):
obj1 = boxes[i]
for j in range(i+1, len(boxes)):
obj2 = boxes[j]
dists.append((0, dist(obj1, obj2), obj1, obj2))
# We could use dists.sort(), but it would randomize the test result.
dists = csort(dists, key=key_obj)
plane = Plane(self.bbox)
plane.extend(boxes)
while dists:
(c, d, obj1, obj2) = dists.pop(0)
if c == 0 and isany(obj1, obj2):
dists.append((1, d, obj1, obj2))
continue
if (isinstance(obj1, (LTTextBoxVertical, LTTextGroupTBRL)) or
isinstance(obj2, (LTTextBoxVertical, LTTextGroupTBRL))):
group = LTTextGroupTBRL([obj1, obj2])
else:
group = LTTextGroupLRTB([obj1, obj2])
plane.remove(obj1)
plane.remove(obj2)
dists = [ (c,d,obj1,obj2) for (c,d,obj1,obj2) in dists
if (obj1 in plane and obj2 in plane) ]
for other in plane:
dists.append((0, dist(group, other), group, other))
dists = csort(dists, key=key_obj)
plane.add(group)
assert len(plane) == 1
return list(plane)
def analyze(self, laparams):
# textobjs is a list of LTChar objects, i.e.
# it has all the individual characters in the page.
(textobjs, otherobjs) = fsplit(lambda obj: isinstance(obj, LTChar), self)
for obj in otherobjs:
obj.analyze(laparams)
if not textobjs:
return
textlines = list(self.group_objects(laparams, textobjs))
(empties, textlines) = fsplit(lambda obj: obj.is_empty(), textlines)
for obj in empties:
obj.analyze(laparams)
textboxes = list(self.group_textlines(laparams, textlines))
if -1 <= laparams.boxes_flow and laparams.boxes_flow <= +1 and textboxes:
self.groups = self.group_textboxes(laparams, textboxes)
assigner = IndexAssigner()
for group in self.groups:
group.analyze(laparams)
assigner.run(group)
textboxes.sort(key=lambda box: box.index)
else:
def getkey(box):
if isinstance(box, LTTextBoxVertical):
return (0, -box.x1, box.y0)
else:
return (1, box.y0, box.x0)
textboxes.sort(key=getkey)
self._objs = textboxes + otherobjs + empties
return
## LTFigure
##
class LTFigure(LTLayoutContainer):
def __init__(self, name, bbox, matrix):
self.name = name
self.matrix = matrix
(x, y, w, h) = bbox
bbox = get_bound(apply_matrix_pt(matrix, (p, q))
for (p, q) in ((x, y), (x+w, y), (x, y+h), (x+w, y+h)))
LTLayoutContainer.__init__(self, bbox)
return
def __repr__(self):
return ('<%s(%s) %s matrix=%s>' %
(self.__class__.__name__, self.name,
bbox2str(self.bbox), matrix2str(self.matrix)))
def analyze(self, laparams):
if not laparams.all_texts:
return
LTLayoutContainer.analyze(self, laparams)
return
## LTPage
##
class LTPage(LTLayoutContainer):
def __init__(self, pageid, bbox, rotate=0):
LTLayoutContainer.__init__(self, bbox)
self.pageid = pageid
self.rotate = rotate
return
def __repr__(self):
return ('<%s(%r) %s rotate=%r>' %
(self.__class__.__name__, self.pageid,
bbox2str(self.bbox), self.rotate))
| 30.278976 | 142 | 0.53336 |
18012d0d6593846865b2666775666824ae35d768 | 2,626 | py | Python | 02_Recursive-decline-method/app/core.py | Mon-ius/Compiling-principle | 02ca432c3db143c02871fbca781654f9782ee464 | [
"MIT"
] | null | null | null | 02_Recursive-decline-method/app/core.py | Mon-ius/Compiling-principle | 02ca432c3db143c02871fbca781654f9782ee464 | [
"MIT"
] | 17 | 2020-01-28T22:21:54.000Z | 2020-03-30T20:29:39.000Z | 02_Recursive-decline-method/app/core.py | Mon-ius/Compiling-principle | 02ca432c3db143c02871fbca781654f9782ee464 | [
"MIT"
] | null | null | null | import re
import collections
# Token specification
NUM = r'(?P<NUM>\d+)'
PLUS = r'(?P<PLUS>\+)'
MINUS = r'(?P<MINUS>-)'
TIMES = r'(?P<TIMES>\*)'
DIVIDE = r'(?P<DIVIDE>/)'
LPAREN = r'(?P<LPAREN>\()'
RPAREN = r'(?P<RPAREN>\))'
WS = r'(?P<WS>\s+)'
master_pat = re.compile('|'.join([NUM, PLUS, MINUS, TIMES,
DIVIDE, LPAREN, RPAREN, WS]))
# Tokenizer
Token = collections.namedtuple('Token', ['type', 'value'])
def generate_tokens(text):
scanner = master_pat.scanner(text)
for m in iter(scanner.match, None):
tok = Token(m.lastgroup, m.group())
if tok.type != 'WS':
yield tok
# Parser
class ExpressionEvaluator:
def parse(self, text):
self.tokens = generate_tokens(text)
self.tok = None # Last symbol consumed
self.nexttok = None # Next symbol tokenized
self._advance() # Load first lookahead token
return self.expr()
def _advance(self):
'Advance one token ahead'
self.tok, self.nexttok = self.nexttok, next(self.tokens, None)
def _accept(self, toktype):
'Test and consume the next token if it matches toktype'
if self.nexttok and self.nexttok.type == toktype:
self._advance()
return True
else:
return False
def _expect(self, toktype):
'Consume next token if it matches toktype or raise SyntaxError'
if not self._accept(toktype):
raise SyntaxError('Expected ' + toktype)
# Grammar rules follow
def expr(self):
"expression ::= term { ('+'|'-') term }*"
exprval = self.term()
while self._accept('PLUS') or self._accept('MINUS'):
op = self.tok.type
right = self.term()
if op == 'PLUS':
exprval += right
elif op == 'MINUS':
exprval -= right
return exprval
def term(self):
"term ::= factor { ('*'|'/') factor }*"
termval = self.factor()
while self._accept('TIMES') or self._accept('DIVIDE'):
op = self.tok.type
right = self.factor()
if op == 'TIMES':
termval *= right
elif op == 'DIVIDE':
termval /= right
return termval
def factor(self):
"factor ::= NUM | ( expr )"
if self._accept('NUM'):
return int(self.tok.value)
elif self._accept('LPAREN'):
exprval = self.expr()
self._expect('RPAREN')
return exprval
else:
raise SyntaxError('Expected NUMBER or LPAREN')
| 29.505618 | 71 | 0.541508 |
b1f62e5b1c486c37fa1aa6f847366c5229cb8240 | 19,329 | py | Python | python/ee/data.py | yanggis/yanggis-earthengine-api | f4c0d6433399c7dfcfd151513ed4ed24d7ba4589 | [
"Apache-2.0"
] | 1 | 2018-09-22T18:03:25.000Z | 2018-09-22T18:03:25.000Z | python/ee/data.py | yanggis/yanggis-earthengine-api | f4c0d6433399c7dfcfd151513ed4ed24d7ba4589 | [
"Apache-2.0"
] | null | null | null | python/ee/data.py | yanggis/yanggis-earthengine-api | f4c0d6433399c7dfcfd151513ed4ed24d7ba4589 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Singleton for all of the library's communcation with the Earth Engine API."""
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
import json
import urllib
import deprecation
import ee_exception
import httplib2
# OAuth2 credentials object. This may be set by ee.Initialize().
_credentials = None
# The base URL for all data calls. This is set by ee.initialize().
_api_base_url = None
# The base URL for map tiles. This is set by ee.initialize().
_tile_base_url = None
# Whether the module has been initialized.
_initialized = False
# Sets the number of milliseconds to wait for a request before considering
# it timed out. 0 means no limit.
_deadline_ms = 0
# The default base URL for API calls.
DEFAULT_API_BASE_URL = 'https://earthengine.googleapis.com/api'
# The default base URL for media/tile calls.
DEFAULT_TILE_BASE_URL = 'https://earthengine.googleapis.com/'
def initialize(credentials=None, api_base_url=None, tile_base_url=None):
"""Initializes the data module, setting credentials and base URLs.
If any of the arguments are unspecified, they will keep their old values;
the defaults if initialize() has never been called before.
Args:
credentials: The OAuth2 credentials.
api_base_url: The EarthEngine REST API endpoint.
tile_base_url: The EarthEngine REST tile endpoint.
"""
global _api_base_url, _tile_base_url, _credentials, _initialized
# If already initialized, only replace the explicitly specified parts.
if credentials is not None:
_credentials = credentials
if api_base_url is not None:
_api_base_url = api_base_url
elif not _initialized:
_api_base_url = DEFAULT_API_BASE_URL
if tile_base_url is not None:
_tile_base_url = tile_base_url
elif not _initialized:
_tile_base_url = DEFAULT_TILE_BASE_URL
_initialized = True
def reset():
"""Resets the data module, clearing credentials and custom base URLs."""
global _api_base_url, _tile_base_url, _credentials, _initialized
_credentials = None
_api_base_url = None
_tile_base_url = None
_initialized = False
def setDeadline(milliseconds):
"""Sets the timeout length for API requests.
Args:
milliseconds: The number of milliseconds to wait for a request
before considering it timed out. 0 means no limit.
"""
global _deadline_ms
_deadline_ms = milliseconds
@deprecation.Deprecated('Use ee.data.getValue()')
def getInfo(asset_id):
"""Load info for an asset, given an asset id.
Args:
asset_id: The asset to be retrieved.
Returns:
The value call results.
"""
return send_('/info', {'id': asset_id})
def getList(params):
"""Get a list of contents for a collection asset.
Args:
params: An object containing request parameters with the
following possible values:
id (string) The asset id of the collection to list.
starttime (number) Start time, in msec since the epoch.
endtime (number) End time, in msec since the epoch.
fields (comma-separated strings) Field names to return.
Returns:
The list call results.
"""
return send_('/list', params)
def getMapId(params):
"""Get a Map ID for a given asset.
Args:
params: An object containing visualization options with the
following possible values:
image - (JSON string) The image to render.
version - (number) Version number of image (or latest).
bands - (comma-seprated strings) Comma-delimited list of
band names to be mapped to RGB.
min - (comma-separated numbers) Value (or one per band)
to map onto 00.
max - (comma-separated numbers) Value (or one per band)
to map onto FF.
gain - (comma-separated numbers) Gain (or one per band)
to map onto 00-FF.
bias - (comma-separated numbers) Offset (or one per band)
to map onto 00-FF.
gamma - (comma-separated numbers) Gamma correction
factor (or one per band)
palette - (comma-separated strings) List of CSS-style color
strings (single-band previews only).
format (string) Either 'jpg' (does not support transparency) or
'png' (supports transparency).
Returns:
A dictionary containing "mapid" and "token" strings, which can
be combined to retrieve tiles from the /map service.
"""
params['json_format'] = 'v2'
return send_('/mapid', params)
def getTileUrl(mapid, x, y, z):
"""Generate a URL for map tiles from a Map ID and coordinates.
Args:
mapid: The Map ID to generate tiles for, a dictionary containing "mapid"
and "token" strings.
x: The tile x coordinate.
y: The tile y coordinate.
z: The tile zoom level.
Returns:
The tile URL.
"""
width = 2 ** z
x %= width
if x < 0:
x += width
return '%s/map/%s/%d/%d/%d?token=%s' % (
_tile_base_url, mapid['mapid'], z, x, y, mapid['token'])
def getValue(params):
"""Retrieve a processed value from the front end.
Args:
params: A dictionary containing:
json - (String) A JSON object to be evaluated.
Returns:
The value call results.
"""
params['json_format'] = 'v2'
return send_('/value', params)
def getThumbnail(params):
"""Get a Thumbnail for a given asset.
Args:
params: Parameters identical to getMapId, plus:
size - (a number or pair of numbers in format WIDTHxHEIGHT) Maximum
dimensions of the thumbnail to render, in pixels. If only one number
is passed, it is used as the maximum, and the other dimension is
computed by proportional scaling.
region - (E,S,W,N or GeoJSON) Geospatial region of the image
to render. By default, the whole image.
format - (string) Either 'png' (default) or 'jpg'.
Returns:
A thumbnail image as raw PNG data.
"""
return send_('/thumb', params, opt_method='GET', opt_raw=True)
def getThumbId(params):
"""Get a Thumbnail ID for a given asset.
Args:
params: Parameters identical to getMapId, plus:
size - (a number or pair of numbers in format WIDTHxHEIGHT) Maximum
dimensions of the thumbnail to render, in pixels. If only one number
is passed, it is used as the maximum, and the other dimension is
computed by proportional scaling.
region - (E,S,W,N or GeoJSON) Geospatial region of the image
to render. By default, the whole image.
format - (string) Either 'png' (default) or 'jpg'.
Returns:
A thumbnail ID.
"""
request = params.copy()
request['getid'] = '1'
request['json_format'] = 'v2'
if 'size' in request and isinstance(request['size'], (list, tuple)):
request['size'] = 'x'.join(map(str, request['size']))
return send_('/thumb', request)
def makeThumbUrl(thumbId):
"""Create a thumbnail URL from the given thumbid and token.
Args:
thumbId: An object containing a thumbnail thumbid and token.
Returns:
A URL from which the thumbnail can be obtained.
"""
return '%s/api/thumb?thumbid=%s&token=%s' % (
_tile_base_url, thumbId['thumbid'], thumbId['token'])
def getDownloadId(params):
"""Get a Download ID.
Args:
params: An object containing visualization options with the following
possible values:
name - a base name to use when constructing filenames.
bands - a description of the bands to download. Must be an array of
dictionaries, each with the following keys:
id - the name of the band, a string, required.
crs - an optional CRS string defining the band projection.
crs_transform - an optional array of 6 numbers specifying an affine
transform from the specified CRS, in the order: xScale,
yShearing, xShearing, yScale, xTranslation and yTranslation.
dimensions - an optional array of two integers defining the width and
height to which the band is cropped.
scale - an optional number, specifying the scale in meters of the
band; ignored if crs and crs_transform is specified.
crs - a default CRS string to use for any bands that do not explicitly
specify one.
crs_transform - a default affine transform to use for any bands that do
not specify one, of the same format as the crs_transform of bands.
dimensions - default image cropping dimensions to use for any bands
that do not specify them.
scale - a default scale to use for any bands that do not specify one;
ignored if crs and crs_transform is specified.
region - a polygon specifying a region to download; ignored if crs
and crs_transform is specified.
Returns:
A dict containing a docid and token.
"""
params['json_format'] = 'v2'
if 'bands' in params and not isinstance(params['bands'], basestring):
params['bands'] = json.dumps(params['bands'])
return send_('/download', params)
def makeDownloadUrl(downloadId):
"""Create a download URL from the given docid and token.
Args:
downloadId: An object containing a download docid and token.
Returns:
A URL from which the download can be obtained.
"""
return '%s/api/download?docid=%s&token=%s' % (
_tile_base_url, downloadId['docid'], downloadId['token'])
def getTableDownloadId(params):
"""Get a Download ID.
Args:
params: An object containing table download options with the following
possible values:
format - The download format, CSV or JSON.
selectors - Comma separated string of selectors that can be used to
determine which attributes will be downloaded.
filename - The name of the file that will be downloaded.
Returns:
A dict containing a docid and token.
"""
params['json_format'] = 'v2'
return send_('/table', params)
def makeTableDownloadUrl(downloadId):
"""Create a table download URL from a docid and token.
Args:
downloadId: A table download id and token.
Returns:
A Url from which the download can be obtained.
"""
return '%s/api/table?docid=%s&token=%s' % (
_tile_base_url, downloadId['docid'], downloadId['token'])
def getAlgorithms():
"""Get the list of algorithms.
Returns:
The dictionary of algorithms. Each algorithm is a dictionary containing
the following fields:
"description" - (string) A text description of the algorithm.
"returns" - (string) The return type of the algorithm.
"args" - An array of arguments. Each argument specifies the following:
"name" - (string) The name of the argument.
"description" - (string) A text description of the argument.
"type" - (string) The type of the argument.
"optional" - (boolean) Whether the argument is optional or not.
"default" - A representation of the default value if the argument
is not specified.
"""
return send_('/algorithms', {}, 'GET')
def createAsset(value, opt_path=None):
"""Save an asset.
Args:
value: The JSON-serialized value of the asset.
opt_path: An optional desired ID, including full path.
Returns:
A description of the saved asset, including a generated ID.
"""
args = {'value': value, 'json_format': 'v2'}
if opt_path is not None:
args['id'] = opt_path
return send_('/create', args)
def newTaskId(count=1):
"""Generate an ID for a long-running task.
Args:
count: Optional count of IDs to generate, one by default.
Returns:
A list containing generated ID strings.
"""
args = {'count': count}
return send_('/newtaskid', args)
def getTaskList():
"""Retrieves a list of the user's tasks.
Returns:
A list of task status dictionaries, one for each task submitted to EE by
the current user. These include currently running tasks as well as recently
canceled or failed tasks.
"""
return send_('/tasklist', {}, 'GET')['tasks']
def getTaskStatus(taskId):
"""Retrieve status of one or more long-running tasks.
Args:
taskId: ID of the task or a list of multiple IDs.
Returns:
List containing one object for each queried task, in the same order as
the input array, each object containing the following values:
id (string) ID of the task.
state (string) State of the task, one of READY, RUNNING, COMPLETED,
FAILED, CANCELLED; or UNKNOWN if the task with the specified ID
doesn't exist.
error_message (string) For a FAILED task, a description of the error.
"""
if isinstance(taskId, basestring):
taskId = [taskId]
args = {'q': ','.join(taskId)}
return send_('/taskstatus', args, 'GET')
def cancelTask(taskId):
"""Cancels a batch task."""
send_('/updatetask', {'id': taskId, 'action': 'CANCEL'})
def prepareValue(taskId, params):
"""Create processing task which computes a value.
Args:
taskId: ID for the task (obtained using newTaskId).
params: The object that describes the value to be evaluated, with the
following field:
json (string) A JSON object to be evaluated.
Returns:
A dict with optional notes about the created task.
"""
args = params.copy()
args['tid'] = taskId
return send_('/prepare', args)
def startProcessing(taskId, params):
"""Create processing task that exports or pre-renders an image.
Args:
taskId: ID for the task (obtained using newTaskId).
params: The object that describes the processing task; only fields
that are common for all processing types are documented below.
type (string) Either 'EXPORT_IMAGE' or 'EXPORT_FEATURES'.
json (string) JSON description of the image.
Returns:
A dict with optional notes about the created task.
"""
args = params.copy()
args['id'] = taskId
return send_('/processingrequest', args)
def startIngestion(taskId, request):
"""Creates an asset import task.
Args:
taskId: ID for the task (obtained using newTaskId).
request: The object that describes the import task, which can
have these fields:
name (string) The destination asset id (e.g. users/foo/bar).
filesets (array) A list of Google Cloud Storage source file paths
formatted like:
[{'sources': [
{'primaryPath': 'foo.tif', 'additionalPaths': ['foo.prj']},
{'primaryPath': 'bar.tif', 'additionalPaths': ['bar.prj'},
]}]
Where path values correspond to source files' Google Cloud Storage
object names, e.g. 'gs://bucketname/filename.tif'
bands (array) An optional list of band names formatted like:
[{'name': 'R'}, {'name': 'G'}, {'name': 'B'}]
extensions (array) An optional list of file extensions formatted like:
['tif', 'prj']. Useful if the file names in GCS lack extensions.
Returns:
A dict with optional notes about the created task.
"""
args = {'id': taskId, 'request': json.dumps(request)}
return send_('/ingestionrequest', args)
def getAssetRoots():
"""Returns the list of the root folders the user owns.
Note: The "id" values for roots are two levels deep, e.g. "users/johndoe"
not "users/johndoe/notaroot".
Returns:
A list of folder descriptions formatted like:
[
{"type": "Folder", "id": "users/foo"},
{"type": "Folder", "id": "projects/bar"},
]
"""
return send_('/buckets', None, 'GET')
def getAssetAcl(assetId):
"""Returns the access control list of the asset with the given ID.
Args:
assetId: The ID of the asset to check.
Returns:
A dict describing the asset's ACL. Looks like:
{
"owners" : ["user@domain1.com"],
"writers": ["user2@domain1.com", "user3@domain1.com"],
"readers": ["some_group@domain2.com"],
"all_users_can_read" : True
}
"""
return send_('/getacl', {'id': assetId}, 'GET')
def setAssetAcl(assetId, aclUpdate):
"""Sets the access control list of the asset with the given ID.
The owner ACL cannot be changed, and the final ACL of the asset
is constructed by merging the OWNER entries of the old ACL with
the incoming ACL record.
Args:
assetId: The ID of the asset to set the ACL on.
aclUpdate: The updated ACL for the asset. Must be formatted like the
value returned by getAssetAcl but without "owners".
"""
send_('/setacl', {'id': assetId, 'value': aclUpdate})
def createAssetHome(requestedId):
"""Attempts to create a home root folder for the current user ("users/joe").
Results in an error if the user already has a home root folder or the
requested ID is unavailable.
Args:
requestedId: The requested ID of the home folder (e.g. "users/joe").
"""
send_('/createbucket', {'id': requestedId})
def send_(path, params, opt_method='POST', opt_raw=False):
"""Send an API call.
Args:
path: The API endpoint to call.
params: The call parameters.
opt_method: The HTTPRequest method (GET or POST).
opt_raw: Whether the data should be returned raw, without attempting
to decode it as JSON.
Returns:
The data object returned by the API call.
Raises:
EEException: For malformed requests or errors from the server.
"""
# Make sure we never perform API calls before initialization.
initialize()
url = _api_base_url + path
payload = urllib.urlencode(params)
http = httplib2.Http(timeout=int(_deadline_ms / 1000) or None)
headers = {}
if _credentials:
http = _credentials.authorize(http)
if opt_method == 'GET':
url = url + ('&' if '?' in url else '?') + payload
payload = None
elif opt_method == 'POST':
headers['Content-type'] = 'application/x-www-form-urlencoded'
else:
raise ee_exception.EEException('Unexpected request method: ' + opt_method)
try:
response, content = http.request(url, method=opt_method, body=payload,
headers=headers)
except httplib2.HttpLib2Error, e:
raise ee_exception.EEException(
'Unexpected HTTP error: %s' % e.message)
# Whether or not the response is an error, it may be JSON.
content_type = (response['content-type'] or 'application/json').split(';')[0]
if content_type in ('application/json', 'text/json') and not opt_raw:
try:
json_content = json.loads(content)
except Exception, e:
raise ee_exception.EEException('Invalid JSON: ' + content)
if 'error' in json_content:
raise ee_exception.EEException(json_content['error']['message'])
if 'data' not in content:
raise ee_exception.EEException('Malformed response: ' + content)
else:
json_content = None
if response.status < 100 or response.status >= 300:
# Note if the response is JSON and contains an error value, we raise that
# error above rather than this generic one.
raise ee_exception.EEException('Server returned HTTP code: %d' %
response.status)
# Now known not to be an error response...
if opt_raw:
return content
elif json_content is None:
raise ee_exception.EEException(
'Response was unexpectedly not JSON, but %s' % response['content-type'])
else:
return json_content['data']
| 31.89604 | 80 | 0.670443 |
4d881a575ceb193c24d8efc41c124c77a2ee3b71 | 53 | py | Python | src/NotYetSelfAware/layers/__init__.py | ezalos/NotYetSelfAware | aa8374d24259be9c93b9b5fc00c07f03538a79df | [
"MIT"
] | 1 | 2021-10-02T09:17:46.000Z | 2021-10-02T09:17:46.000Z | src/NotYetSelfAware/layers/__init__.py | ezalos/NotYetSelfAware | aa8374d24259be9c93b9b5fc00c07f03538a79df | [
"MIT"
] | null | null | null | src/NotYetSelfAware/layers/__init__.py | ezalos/NotYetSelfAware | aa8374d24259be9c93b9b5fc00c07f03538a79df | [
"MIT"
] | null | null | null | from .dense import Dense
from .output import Output
| 13.25 | 26 | 0.792453 |
404c3057e1e03bb7f3bc9805f7979b78cb66b1e2 | 15,205 | py | Python | sdks/python/apache_beam/metrics/cells.py | bobingm/beam | 7dce40187f939424b8249c2c21eaeb4c70c26d31 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | sdks/python/apache_beam/metrics/cells.py | bobingm/beam | 7dce40187f939424b8249c2c21eaeb4c70c26d31 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | sdks/python/apache_beam/metrics/cells.py | bobingm/beam | 7dce40187f939424b8249c2c21eaeb4c70c26d31 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: language_level=3
"""
This file contains metric cell classes. A metric cell is used to accumulate
in-memory changes to a metric. It represents a specific metric in a single
context.
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import threading
import time
from builtins import object
from datetime import datetime
from typing import Any
from typing import Optional
from typing import SupportsInt
try:
import cython
except ImportError:
class fake_cython:
compiled = False
globals()['cython'] = fake_cython
__all__ = [
'MetricAggregator',
'MetricCell',
'MetricCellFactory',
'DistributionResult',
'GaugeResult'
]
class MetricCell(object):
"""For internal use only; no backwards-compatibility guarantees.
Accumulates in-memory changes to a metric.
A MetricCell represents a specific metric in a single context and bundle.
All subclasses must be thread safe, as these are used in the pipeline runners,
and may be subject to parallel/concurrent updates. Cells should only be used
directly within a runner.
"""
def __init__(self):
self._lock = threading.Lock()
self._start_time = None
def update(self, value):
raise NotImplementedError
def get_cumulative(self):
raise NotImplementedError
def to_runner_api_monitoring_info(self, name, transform_id):
if not self._start_time:
self._start_time = datetime.utcnow()
mi = self.to_runner_api_monitoring_info_impl(name, transform_id)
mi.start_time.FromDatetime(self._start_time)
return mi
def to_runner_api_monitoring_info_impl(self, name, transform_id):
raise NotImplementedError
def reset(self):
# type: () -> None
raise NotImplementedError
def __reduce__(self):
raise NotImplementedError
class MetricCellFactory(object):
def __call__(self):
# type: () -> MetricCell
raise NotImplementedError
class CounterCell(MetricCell):
"""For internal use only; no backwards-compatibility guarantees.
Tracks the current value and delta of a counter metric.
Each cell tracks the state of a metric independently per context per bundle.
Therefore, each metric has a different cell in each bundle, cells are
aggregated by the runner.
This class is thread safe.
"""
def __init__(self, *args):
super(CounterCell, self).__init__(*args)
self.value = CounterAggregator.identity_element()
def reset(self):
# type: () -> None
self.value = CounterAggregator.identity_element()
def combine(self, other):
# type: (CounterCell) -> CounterCell
result = CounterCell()
result.inc(self.value + other.value)
return result
def inc(self, n=1):
self.update(n)
def dec(self, n=1):
self.update(-n)
def update(self, value):
if cython.compiled:
ivalue = value
# Since We hold the GIL, no need for another lock.
# And because the C threads won't preempt and interleave
# each other.
# Assuming there is no code trying to access the counters
# directly by circumventing the GIL.
self.value += ivalue
else:
with self._lock:
self.value += value
def get_cumulative(self):
# type: () -> int
with self._lock:
return self.value
def to_runner_api_monitoring_info_impl(self, name, transform_id):
from apache_beam.metrics import monitoring_infos
if not name.urn:
# User counter case.
return monitoring_infos.int64_user_counter(
name.namespace,
name.name,
self.get_cumulative(),
ptransform=transform_id)
else:
# Arbitrary URN case.
return monitoring_infos.int64_counter(
name.urn, self.get_cumulative(), labels=name.labels)
class DistributionCell(MetricCell):
"""For internal use only; no backwards-compatibility guarantees.
Tracks the current value and delta for a distribution metric.
Each cell tracks the state of a metric independently per context per bundle.
Therefore, each metric has a different cell in each bundle, that is later
aggregated.
This class is thread safe.
"""
def __init__(self, *args):
super(DistributionCell, self).__init__(*args)
self.data = DistributionAggregator.identity_element()
def reset(self):
# type: () -> None
self.data = DistributionAggregator.identity_element()
def combine(self, other):
# type: (DistributionCell) -> DistributionCell
result = DistributionCell()
result.data = self.data.combine(other.data)
return result
def update(self, value):
if cython.compiled:
# We will hold the GIL throughout the entire _update.
self._update(value)
else:
with self._lock:
self._update(value)
def _update(self, value):
if cython.compiled:
ivalue = value
else:
ivalue = int(value)
self.data.count = self.data.count + 1
self.data.sum = self.data.sum + ivalue
if ivalue < self.data.min:
self.data.min = ivalue
if ivalue > self.data.max:
self.data.max = ivalue
def get_cumulative(self):
# type: () -> DistributionData
with self._lock:
return self.data.get_cumulative()
def to_runner_api_monitoring_info_impl(self, name, transform_id):
from apache_beam.metrics import monitoring_infos
return monitoring_infos.int64_user_distribution(
name.namespace,
name.name,
self.get_cumulative(),
ptransform=transform_id)
class GaugeCell(MetricCell):
"""For internal use only; no backwards-compatibility guarantees.
Tracks the current value and delta for a gauge metric.
Each cell tracks the state of a metric independently per context per bundle.
Therefore, each metric has a different cell in each bundle, that is later
aggregated.
This class is thread safe.
"""
def __init__(self, *args):
super(GaugeCell, self).__init__(*args)
self.data = GaugeAggregator.identity_element()
def reset(self):
self.data = GaugeAggregator.identity_element()
def combine(self, other):
# type: (GaugeCell) -> GaugeCell
result = GaugeCell()
result.data = self.data.combine(other.data)
return result
def set(self, value):
self.update(value)
def update(self, value):
# type: (SupportsInt) -> None
value = int(value)
with self._lock:
# Set the value directly without checking timestamp, because
# this value is naturally the latest value.
self.data.value = value
self.data.timestamp = time.time()
def get_cumulative(self):
# type: () -> GaugeData
with self._lock:
return self.data.get_cumulative()
def to_runner_api_monitoring_info_impl(self, name, transform_id):
from apache_beam.metrics import monitoring_infos
return monitoring_infos.int64_user_gauge(
name.namespace,
name.name,
self.get_cumulative(),
ptransform=transform_id)
class DistributionResult(object):
"""The result of a Distribution metric."""
def __init__(self, data):
# type: (DistributionData) -> None
self.data = data
def __eq__(self, other):
# type: (object) -> bool
if isinstance(other, DistributionResult):
return self.data == other.data
else:
return False
def __hash__(self):
# type: () -> int
return hash(self.data)
def __repr__(self):
# type: () -> str
return 'DistributionResult(sum={}, count={}, min={}, max={})'.format(
self.sum, self.count, self.min, self.max)
@property
def max(self):
# type: () -> Optional[int]
return self.data.max if self.data.count else None
@property
def min(self):
# type: () -> Optional[int]
return self.data.min if self.data.count else None
@property
def count(self):
# type: () -> Optional[int]
return self.data.count
@property
def sum(self):
# type: () -> Optional[int]
return self.data.sum
@property
def mean(self):
# type: () -> Optional[float]
"""Returns the float mean of the distribution.
If the distribution contains no elements, it returns None.
"""
if self.data.count == 0:
return None
return self.data.sum / self.data.count
class GaugeResult(object):
def __init__(self, data):
# type: (GaugeData) -> None
self.data = data
def __eq__(self, other):
# type: (object) -> bool
if isinstance(other, GaugeResult):
return self.data == other.data
else:
return False
def __hash__(self):
# type: () -> int
return hash(self.data)
def __repr__(self):
return '<GaugeResult(value={}, timestamp={})>'.format(
self.value, self.timestamp)
@property
def value(self):
# type: () -> Optional[int]
return self.data.value
@property
def timestamp(self):
# type: () -> Optional[int]
return self.data.timestamp
class GaugeData(object):
"""For internal use only; no backwards-compatibility guarantees.
The data structure that holds data about a gauge metric.
Gauge metrics are restricted to integers only.
This object is not thread safe, so it's not supposed to be modified
by other than the GaugeCell that contains it.
"""
def __init__(self, value, timestamp=None):
# type: (Optional[int], Optional[int]) -> None
self.value = value
self.timestamp = timestamp if timestamp is not None else 0
def __eq__(self, other):
# type: (object) -> bool
if isinstance(other, GaugeData):
return self.value == other.value and self.timestamp == other.timestamp
else:
return False
def __hash__(self):
# type: () -> int
return hash((self.value, self.timestamp))
def __repr__(self):
# type: () -> str
return '<GaugeData(value={}, timestamp={})>'.format(
self.value, self.timestamp)
def get_cumulative(self):
# type: () -> GaugeData
return GaugeData(self.value, timestamp=self.timestamp)
def combine(self, other):
# type: (Optional[GaugeData]) -> GaugeData
if other is None:
return self
if other.timestamp > self.timestamp:
return other
else:
return self
@staticmethod
def singleton(value, timestamp=None):
# type: (Optional[int], Optional[int]) -> GaugeData
return GaugeData(value, timestamp=timestamp)
class DistributionData(object):
"""For internal use only; no backwards-compatibility guarantees.
The data structure that holds data about a distribution metric.
Distribution metrics are restricted to distributions of integers only.
This object is not thread safe, so it's not supposed to be modified
by other than the DistributionCell that contains it.
"""
def __init__(self, sum, count, min, max):
# type: (int, int, int, int) -> None
if count:
self.sum = sum
self.count = count
self.min = min
self.max = max
else:
self.sum = self.count = 0
self.min = 2**63 - 1
# Avoid Wimplicitly-unsigned-literal caused by -2**63.
self.max = -self.min - 1
def __eq__(self, other):
# type: (object) -> bool
if isinstance(other, DistributionData):
return (
self.sum == other.sum and self.count == other.count and
self.min == other.min and self.max == other.max)
else:
return False
def __hash__(self):
# type: () -> int
return hash((self.sum, self.count, self.min, self.max))
def __repr__(self):
# type: () -> str
return 'DistributionData(sum={}, count={}, min={}, max={})'.format(
self.sum, self.count, self.min, self.max)
def get_cumulative(self):
# type: () -> DistributionData
return DistributionData(self.sum, self.count, self.min, self.max)
def combine(self, other):
# type: (Optional[DistributionData]) -> DistributionData
if other is None:
return self
return DistributionData(
self.sum + other.sum,
self.count + other.count,
self.min if self.min < other.min else other.min,
self.max if self.max > other.max else other.max)
@staticmethod
def singleton(value):
# type: (int) -> DistributionData
return DistributionData(value, 1, value, value)
class MetricAggregator(object):
"""For internal use only; no backwards-compatibility guarantees.
Base interface for aggregating metric data during pipeline execution."""
def identity_element(self):
# type: () -> Any
"""Returns the identical element of an Aggregation.
For the identity element, it must hold that
Aggregator.combine(any_element, identity_element) == any_element.
"""
raise NotImplementedError
def combine(self, x, y):
# type: (Any, Any) -> Any
raise NotImplementedError
def result(self, x):
# type: (Any) -> Any
raise NotImplementedError
class CounterAggregator(MetricAggregator):
"""For internal use only; no backwards-compatibility guarantees.
Aggregator for Counter metric data during pipeline execution.
Values aggregated should be ``int`` objects.
"""
@staticmethod
def identity_element():
# type: () -> int
return 0
def combine(self, x, y):
# type: (SupportsInt, SupportsInt) -> int
return int(x) + int(y)
def result(self, x):
# type: (SupportsInt) -> int
return int(x)
class DistributionAggregator(MetricAggregator):
"""For internal use only; no backwards-compatibility guarantees.
Aggregator for Distribution metric data during pipeline execution.
Values aggregated should be ``DistributionData`` objects.
"""
@staticmethod
def identity_element():
# type: () -> DistributionData
return DistributionData(0, 0, 2**63 - 1, -2**63)
def combine(self, x, y):
# type: (DistributionData, DistributionData) -> DistributionData
return x.combine(y)
def result(self, x):
# type: (DistributionData) -> DistributionResult
return DistributionResult(x.get_cumulative())
class GaugeAggregator(MetricAggregator):
"""For internal use only; no backwards-compatibility guarantees.
Aggregator for Gauge metric data during pipeline execution.
Values aggregated should be ``GaugeData`` objects.
"""
@staticmethod
def identity_element():
# type: () -> GaugeData
return GaugeData(0, timestamp=0)
def combine(self, x, y):
# type: (GaugeData, GaugeData) -> GaugeData
result = x.combine(y)
return result
def result(self, x):
# type: (GaugeData) -> GaugeResult
return GaugeResult(x.get_cumulative())
| 27.249104 | 80 | 0.683328 |
4c6f9363c40c3da5065496f1927f25950b3c1f0b | 124 | py | Python | posts/admin.py | renzo-raizer/HorizonTour | 7b2fa1953d4ed32603280059f24afe447d058a73 | [
"Apache-2.0"
] | null | null | null | posts/admin.py | renzo-raizer/HorizonTour | 7b2fa1953d4ed32603280059f24afe447d058a73 | [
"Apache-2.0"
] | null | null | null | posts/admin.py | renzo-raizer/HorizonTour | 7b2fa1953d4ed32603280059f24afe447d058a73 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from .models import Postagens
# Register your models here.
admin.site.register(Postagens) | 20.666667 | 32 | 0.814516 |
da5a3c27d987adeed2fcbca042c2e5f09e7d0ffb | 3,236 | py | Python | profiles_project/settings.py | TanishqGupta11/profiles-rest-api | 60e6fff08844a5c34897f189eb9b76d475781180 | [
"MIT"
] | 1 | 2020-01-27T14:21:51.000Z | 2020-01-27T14:21:51.000Z | profiles_project/settings.py | TanishqGupta11/profiles-rest-api | 60e6fff08844a5c34897f189eb9b76d475781180 | [
"MIT"
] | 5 | 2020-06-06T01:24:34.000Z | 2022-02-10T12:55:38.000Z | profiles_project/settings.py | TanishqGupta11/profiles-rest-api | 60e6fff08844a5c34897f189eb9b76d475781180 | [
"MIT"
] | null | null | null | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '20ycwt&zw(@@mp^k+2o@m0%u@sfd)9_=e0lezd3x^0nde2mj50'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| 25.68254 | 91 | 0.699938 |
079cedc6886584164dbd7eedb56f9fb1e5018d8b | 280 | py | Python | src/forms/search.py | anthony-chukwuemeka-nwachukwu/Movie-Recommender-System | 0684edd0c03289e949ef6481b291fc51a30856b5 | [
"MIT"
] | null | null | null | src/forms/search.py | anthony-chukwuemeka-nwachukwu/Movie-Recommender-System | 0684edd0c03289e949ef6481b291fc51a30856b5 | [
"MIT"
] | null | null | null | src/forms/search.py | anthony-chukwuemeka-nwachukwu/Movie-Recommender-System | 0684edd0c03289e949ef6481b291fc51a30856b5 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import SubmitField, SearchField, StringField
from wtforms.validators import DataRequired, Length
class SearchForm(FlaskForm):
query = SearchField(validators=[DataRequired(), Length(min=3)])
submit = SubmitField('Search')
| 31.111111 | 67 | 0.778571 |
e3f7a6705af325d6882b34f16a88d1df3b28e2bb | 9,350 | py | Python | app/buildhistory/models.py | Dhruv-Sachdev1313/macports-webapp | ed3d8efceac4cfb694a51241cf81023ed10aade6 | [
"BSD-2-Clause"
] | null | null | null | app/buildhistory/models.py | Dhruv-Sachdev1313/macports-webapp | ed3d8efceac4cfb694a51241cf81023ed10aade6 | [
"BSD-2-Clause"
] | null | null | null | app/buildhistory/models.py | Dhruv-Sachdev1313/macports-webapp | ed3d8efceac4cfb694a51241cf81023ed10aade6 | [
"BSD-2-Clause"
] | null | null | null | import urllib.request
import ssl
import json
import datetime
from django.db import models, transaction
from django.contrib.postgres.fields import JSONField
from config import BUILDERS_JSON_URL, BUILDBOT_URL_PREFIX, BUILDS_FETCHED_COUNT
from port.database import StringToArray
class BuilderModelManager(models.Manager):
def get_queryset(self):
return super(BuilderModelManager, self).get_queryset().annotate(version_array=StringToArray('display_name'),).order_by('-version_array')
class Builder(models.Model):
name = models.CharField(max_length=100, db_index=True, verbose_name="Name of the builder as per Buildbot")
display_name = models.CharField(max_length=20, db_index=True, default='', verbose_name="Simplified builder name: 10.XX")
natural_name = models.CharField(max_length=50, default='', verbose_name="Name of the MacOS version, e.g. Catalina")
def __str__(self):
return "%s" % self.name
objects = BuilderModelManager()
class Meta:
db_table = "builder"
verbose_name = "Builder"
verbose_name_plural = "Builders"
class BuildHistory(models.Model):
builder_name = models.ForeignKey(Builder, on_delete=models.CASCADE, related_name='builds')
build_id = models.IntegerField()
status = models.CharField(max_length=50)
port_name = models.CharField(max_length=200)
port_version = models.CharField(max_length=100, null=True)
port_revision = models.CharField(max_length=50, null=True)
time_start = models.DateTimeField()
time_elapsed = models.DurationField(null=True)
watcher_id = models.IntegerField()
class Meta:
db_table = "builds"
verbose_name = "Build"
verbose_name_plural = "Builds"
indexes = [
models.Index(fields=['port_name', 'builder_name', '-build_id']),
models.Index(fields=['port_name', 'builder_name', '-time_start']),
models.Index(fields=['port_name', 'status', 'builder_name']),
models.Index(fields=['port_name', 'builder_name']),
models.Index(fields=['port_name', 'status']),
models.Index(fields=['-time_start']),
models.Index(fields=['port_name']),
models.Index(fields=['status']),
models.Index(fields=['builder_name'])
]
@classmethod
def populate(cls):
url_prefix = BUILDBOT_URL_PREFIX
def get_url_json(builder_name, build_number):
return '{}/json/builders/ports-{}-builder/builds/{}'.format(url_prefix, builder_name, build_number)
def get_url_build(builder_name, build_number):
return '{}/builders/ports-{}-builder/builds/{}'.format(url_prefix, builder_name, build_number)
def get_files_url(builder_name, build_number):
return '{}/builders/ports-{}-builder/builds/{}/steps/install-port/logs/files/text'.format(url_prefix, builder_name, build_number)
def get_data_from_url(url):
gcontext = ssl.SSLContext()
try:
with urllib.request.urlopen(url, context=gcontext) as u:
data = json.loads(u.read().decode())
return data
except urllib.error.URLError:
return {}
def get_text_from_url(url):
gcontext = ssl.SSLContext()
try:
lines = urllib.request.urlopen(url, context=gcontext)
return lines
except urllib.error.URLError:
return []
def get_build_properties(array):
properties = {}
for prop in array['properties']:
properties[prop[0]] = prop[1]
return properties
def return_summary(builder_name, build_number, build_data):
data = {}
properties = get_build_properties(build_data)
port_name = properties['portname']
status = ' '.join(build_data['text'])
time_start = build_data['times'][0]
time_build = float(build_data['times'][1]) - float(build_data['times'][0])
data['name'] = port_name
data['url'] = get_url_build(builder_name, build_number)
data['watcher_id'] = properties['triggered_by'].split('/')[6]
data['watcher_url'] = properties['triggered_by']
data['status'] = status
data['builder'] = builder_name
data['buildnr'] = build_number
data['time_start'] = str(datetime.datetime.fromtimestamp(int(float(time_start)), tz=datetime.timezone.utc))
data['buildtime'] = str(
datetime.timedelta(seconds=int(float(time_build)))) if time_build != -1 else None
return data
def load_build_to_db(builder_obj, data):
build = BuildHistory()
build.port_name = data['name']
build.status = data['status']
build.build_id = data['buildnr']
build.time_start = data['time_start']
build.time_elapsed = data['buildtime']
build.builder_name = builder_obj
build.build_url = data['url']
build.watcher_url = data['watcher_url']
build.watcher_id = data['watcher_id']
build.save()
return build
@transaction.atomic()
def load_files_to_db(build_obj, lines):
for line in lines:
decoded_line = line.decode("utf-8")
file_obj = InstalledFile()
file_obj.build = build_obj
file_obj.file = decoded_line
file_obj.save()
for builder in Builder.objects.all():
buildername = builder.name
# fetch the last build first in order to figure out its number
last_build_data = get_data_from_url(get_url_json(builder.name, -1))
if not last_build_data:
continue
last_build_number = last_build_data['number']
last_build_in_db = BuildHistory.objects.filter(builder_name_id=builder.id).order_by('-build_id').first()
if last_build_in_db:
build_in_database = last_build_in_db.build_id + 1
else:
build_in_database = last_build_number - BUILDS_FETCHED_COUNT
for build_number in range(build_in_database, last_build_number):
build_data = get_data_from_url(get_url_json(buildername, build_number))
installed_files = get_text_from_url(get_files_url(buildername, build_number))
if not build_data:
break
build_data_summary = return_summary(buildername, build_number, build_data)
build_obj = load_build_to_db(builder, build_data_summary)
load_files_to_db(build_obj, installed_files)
@classmethod
def buildbot2_parse(cls, build_object):
properties_key = 'properties'
def get_state():
key = 'state_string'
if build_object.get(key):
return build_object[key]
return "unknown"
def get_port_info(obj):
port_version = obj.get('portversion', [None])[0]
port_revision = obj.get('portrevision', [None])[0]
return port_version, port_revision
def get_build_times():
time_start = str(datetime.datetime.fromtimestamp(int(float(build_object['started_at'])), tz=datetime.timezone.utc))
time_build_seconds = None
if build_object.get('complete') is True:
time_build_seconds = float(build_object['complete_at']) - float(build_object['started_at'])
time_build = None
if time_build_seconds:
time_build = str(datetime.timedelta(seconds=int(float(time_build_seconds))))
return [time_start, time_build]
# First try to get those fields which are essential for any build
# If the process of getting these fields fails, abort
try:
build_id = build_object['buildid']
builder_name = build_object[properties_key]['workername'][0]
builder_name = builder_name.replace("ports-", "")
port_name = build_object[properties_key]['portname'][0]
except (KeyError, TypeError, IndexError, ValueError):
# invalid build object, cannot proceed
return None
builder, builder_created = Builder.objects.get_or_create(name=builder_name)
build, builder_created = BuildHistory.objects.get_or_create(
builder_name_id=builder.id,
build_id=build_id,
port_name=port_name,
time_start=get_build_times()[0],
watcher_id=build_object.get('builderid', 0)
)
build.status = get_state()
build.port_version, build.port_revision = get_port_info(build_object[properties_key])
build.time_elapsed = get_build_times()[1]
build.save()
return build
class InstalledFile(models.Model):
build = models.ForeignKey('buildhistory.BuildHistory', on_delete=models.CASCADE, related_name='files')
file = models.TextField()
class Meta:
db_table = "installed_files"
verbose_name = "File"
verbose_name_plural = "Files"
class TempBuildJSON(models.Model):
build_data = JSONField(default=dict)
| 39.787234 | 144 | 0.629412 |
3af1f467c433f1770e569b45e09608ce6ebfb1b4 | 1,536 | py | Python | src/models/train_model.py | davidelofrese/dvc-dga-notebook | 31ab723c892851a3f7f2e15b49c6cced6a466cc0 | [
"MIT"
] | null | null | null | src/models/train_model.py | davidelofrese/dvc-dga-notebook | 31ab723c892851a3f7f2e15b49c6cced6a466cc0 | [
"MIT"
] | null | null | null | src/models/train_model.py | davidelofrese/dvc-dga-notebook | 31ab723c892851a3f7f2e15b49c6cced6a466cc0 | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.ensemble import RandomForestClassifier
def train_model(all_domains, params):
# We exclude the weird class from our ML training
not_weird = all_domains[all_domains['class'] != 'weird']
# List of feature vectors (scikit learn uses 'X' for the matrix of feature vectors)
X = not_weird[['length', 'entropy', 'alexa_grams', 'word_grams']].values
# Labels (scikit learn uses 'y' for classification labels)
y = np.array(not_weird['class'].tolist())
# Random Forest is a popular ensemble machine learning classifier.
# http://scikit-learn.org/dev/modules/generated/sklearn.ensemble.RandomForestClassifier.html
clf = RandomForestClassifier(n_estimators=params['n_estimators'], random_state=params['seed'])
# Fit the random forest model
clf.fit(X, y)
return clf
if __name__ == '__main__':
import argparse
import pickle
import yaml
import os
import pandas as pd
parser = argparse.ArgumentParser('train_model.py')
parser.add_argument('training_set', help='Training set')
parser.add_argument('output_dir', help='Directory to save the trained model')
args = parser.parse_args()
with open(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'params.yaml'), 'r') as pf:
params = yaml.safe_load(pf)
# Load the training set
training_set = pd.read_pickle(args.training_set)
clf = train_model(training_set, params['models'])
# Save trained model
os.makedirs(args.output_dir, exist_ok=True)
pickle.dump(clf, open(os.path.join(args.output_dir, 'trained_model.pkl'), 'wb')) | 34.133333 | 100 | 0.751953 |
8295ace12affd4b2e2eb7fe673ebbffaacbe563a | 5,446 | py | Python | functionaltests/api/v2/test_zone.py | kiall/designate-py3 | 2b135d64bb0ced77327a563e037b270d1e5ca308 | [
"Apache-2.0"
] | null | null | null | functionaltests/api/v2/test_zone.py | kiall/designate-py3 | 2b135d64bb0ced77327a563e037b270d1e5ca308 | [
"Apache-2.0"
] | null | null | null | functionaltests/api/v2/test_zone.py | kiall/designate-py3 | 2b135d64bb0ced77327a563e037b270d1e5ca308 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tempest_lib.exceptions import Conflict
from tempest_lib.exceptions import Forbidden
from tempest_lib.exceptions import NotFound
from functionaltests.common import datagen
from functionaltests.api.v2.base import DesignateV2Test
from functionaltests.api.v2.clients.zone_client import ZoneClient
from functionaltests.api.v2.clients.zone_import_client import ZoneImportClient
class ZoneTest(DesignateV2Test):
def setUp(self):
super(ZoneTest, self).setUp()
self.increase_quotas(user='default')
def _create_zone(self, zone_model, user='default'):
resp, model = ZoneClient.as_user(user).post_zone(zone_model)
self.assertEqual(resp.status, 202)
ZoneClient.as_user(user).wait_for_zone(model.id)
return resp, model
def test_list_zones(self):
self._create_zone(datagen.random_zone_data())
resp, model = ZoneClient.as_user('default').list_zones()
self.assertEqual(resp.status, 200)
self.assertGreater(len(model.zones), 0)
def test_create_zone(self):
self._create_zone(datagen.random_zone_data(), user='default')
def test_update_zone(self):
post_model = datagen.random_zone_data()
resp, old_model = self._create_zone(post_model)
patch_model = datagen.random_zone_data()
del patch_model.name # don't try to override the zone name
resp, new_model = ZoneClient.as_user('default').patch_zone(
old_model.id, patch_model)
self.assertEqual(resp.status, 202)
ZoneClient.as_user('default').wait_for_zone(new_model.id)
resp, model = ZoneClient.as_user('default').get_zone(new_model.id)
self.assertEqual(resp.status, 200)
self.assertEqual(new_model.id, old_model.id)
self.assertEqual(new_model.name, old_model.name)
self.assertEqual(new_model.ttl, patch_model.ttl)
self.assertEqual(new_model.email, patch_model.email)
def test_delete_zone(self):
resp, model = self._create_zone(datagen.random_zone_data())
resp, model = ZoneClient.as_user('default').delete_zone(model.id)
self.assertEqual(resp.status, 202)
ZoneClient.as_user('default').wait_for_zone_404(model.id)
class ZoneOwnershipTest(DesignateV2Test):
def setup(self):
super(ZoneTest, self).setUp()
self.increase_quotas(user='default')
self.increase_quotas(user='alt')
def _create_zone(self, zone_model, user):
resp, model = ZoneClient.as_user(user).post_zone(zone_model)
self.assertEqual(resp.status, 202)
ZoneClient.as_user(user).wait_for_zone(model.id)
return resp, model
def test_no_create_duplicate_domain(self):
zone = datagen.random_zone_data()
self._create_zone(zone, user='default')
self.assertRaises(Conflict,
lambda: self._create_zone(zone, user='default'))
self.assertRaises(Conflict,
lambda: self._create_zone(zone, user='alt'))
def test_no_create_subdomain_by_alt_user(self):
zone = datagen.random_zone_data()
subzone = datagen.random_zone_data(name='sub.' + zone.name)
subsubzone = datagen.random_zone_data(name='sub.sub.' + zone.name)
self._create_zone(zone, user='default')
self.assertRaises(Forbidden,
lambda: self._create_zone(subzone, user='alt'))
self.assertRaises(Forbidden,
lambda: self._create_zone(subsubzone, user='alt'))
def test_no_create_superdomain_by_alt_user(self):
superzone = datagen.random_zone_data()
zone = datagen.random_zone_data(name="a.b." + superzone.name)
self._create_zone(zone, user='default')
self.assertRaises(Forbidden,
lambda: self._create_zone(superzone, user='alt'))
class ZoneImportTest(DesignateV2Test):
def setUp(self):
super(ZoneImportTest, self).setUp()
def test_import_domain(self):
user = 'default'
import_client = ZoneImportClient.as_user(user)
zone_client = ZoneClient.as_user(user)
zonefile = datagen.random_zonefile_data()
resp, model = import_client.post_zone_import(
zonefile)
import_id = model.id
self.assertEqual(resp.status, 202)
self.assertEqual(model.status, 'PENDING')
import_client.wait_for_zone_import(import_id)
resp, model = import_client.get_zone_import(
model.id)
self.assertEqual(resp.status, 200)
self.assertEqual(model.status, 'COMPLETE')
# Wait for the zone to become 'ACTIVE'
zone_client.wait_for_zone(model.zone_id)
resp, zone_model = zone_client.get_zone(model.zone_id)
# Now make sure we can delete the zone_import
import_client.delete_zone_import(import_id)
self.assertRaises(NotFound,
lambda: import_client.get_zone_import(model.id))
| 38.083916 | 78 | 0.701249 |
05205c4dd31634dde8ae6cf12771a96787e98da0 | 6,434 | py | Python | various/iap_11_oop.py | Walber55/projects | 6c3de0797ffbc2dfcb5c4a1fa5a9ddd4580bbb1e | [
"MIT"
] | 15 | 2016-10-09T10:10:16.000Z | 2021-06-02T08:52:47.000Z | various/iap_11_oop.py | Walber55/projects | 6c3de0797ffbc2dfcb5c4a1fa5a9ddd4580bbb1e | [
"MIT"
] | null | null | null | various/iap_11_oop.py | Walber55/projects | 6c3de0797ffbc2dfcb5c4a1fa5a9ddd4580bbb1e | [
"MIT"
] | 9 | 2018-04-26T16:55:36.000Z | 2022-02-02T17:25:54.000Z | #!/usr/bin/env python2
# *-* coding: utf-8 *-*
"Interactive Python Part 11: OOP"
from __future__ import division
import turtle
from math import sqrt
class Point(object):
" Point class for representing and manipulation cartesian coordinates"
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __str__(self):
return str(self.x)+","+str(self.y)
def get_x(self):
return self.x
def get_y(self):
return self.y
def halfway(self, target):
mx = (self.x + target.x)/2
my = (self.y + target.y)/2
return Point(mx, my)
def distance_to_origin(self):
return ((self.x ** 2) + (self.y ** 2)) ** 0.5
def distance_to(self, point):
"""Add a distanceFromPoint method that works similar to
distanceFromOrigin except that it takes a Point as a parameter and
computes the distance between that point and self."""
dx = point.get_x()-self.x
dy = point.get_y()-self.y
return sqrt(dx**2 + dy**2)
def reflect(self):
"""Add a method reflect_x to Point which returns a new Point, one which
is the reflection of the point about the x-axis. For example,
Point(3, 5).reflect_x() is (3, -5)"""
return self.x, -self.y
def slope_to_origin(self):
"""Add a method slope_from_origin which returns the slope of the line
joining the origin to the point."""
if self.x:
return self.y/self.x
def move(self, units):
"""Add a method called move that will take two parameters, call them dx
and dy. The method will cause the point to move in the x and y direction
the number of units given."""
self.x += units
self.y += units
def center_circle(p1, p2, p3):
"""Given three points that fall on the circumference of a circle, find the
center and radius of the circle."""
mr = (p2.y - p1.y) / (p2.x - p1.x)
mt = (p3.y - p2.y) / (p3.x - p2.x)
x = (mr*mt*(p3.y-p1.y)+mr*(p2.x+p3.x)-mt*(p1.x+p2.x))/(2*(mr-mt))
y = -(1/mr)*(x-((p1.x+p2.x)/2))+(p1.y+p2.y)/2
center = (x, y)
radius = sqrt((p2.x-x)**2+(p2.y-y)**2)
print "center is: ", center
print "radius is: ", radius
class Fraction(object):
def __init__(self, top, bottom):
self.num = top #the numerator is on top
self.den = bottom #the denominator is on the bottom
def __str__(self):
return str(self.num) + "/" + str(self.den)
def get_num(self):
return self.num
def get_den(self):
return self.den
def gcd(self, m, n):
"Greatest Common Denominator"
while m % n:
oldm = m
oldn = n
m = oldn
n = oldm % oldn
return n
def simplify(self):
common = self.gcd(self.num, self.den)
self.num /= common
self.den /= common
def __add__(self, fract):
newnum = self.num * fract.den + self.den * fract.num
newden = self.den * fract.den
common = self.gcd(newnum, newden)
return Fraction(newnum / common, newden / common)
def __mul__(self, fract):
newnum = self.num * fract.num
newden = self.den * fract.den
return Fraction(newnum, newden)
__rmul__ = __mul__
class Rectangle(object):
def __init__(self, corner, width, height):
self.corner = corner
self.width = width
self.height = height
def __str__(self):
return "%s %s" % (str(self.width), str(self.height))
def get_width(self):
return self.width
def get_height(self):
return self.height
def area(self):
"""Add a method area to the Rectangle class that returns the area of
any instance"""
return self.width * self.height
def perimeter(self):
"""Write a perimeter method in the Rectangle class so that we can find
the perimeter of any rectangle instance"""
return 2 * (self.width + self.height)
def transpose(self):
"""Write a transpose method in the Rectangle class that swaps the width
and the height of any rectangle instance"""
self.width, self.height = self.height, self.width
def diagonal(self):
"""Write a new method called diagonal that will return the length of the
diagonal that runs from the lower left corner to the opposite corner."""
return sqrt(self.width**2 + self.height**2)
def contains(self, point):
"""Write a new method in the Rectangle class to test if a Point falls
within the rectangle. For this exercise, assume that a rectangle at (0,0)
with width 10 and height 5 has open upper bounds on the width and height,
i.e. it stretches in the x direction from [0 to 10), where 0 is included
but 10 is excluded, and from [0 to 5) in the y direction. So it does not
contain the point (10, 2)."""
x, y = point.get_x(), point.get_y()
return 0 <= x < self.width and 0 <= y < self.height
def collides_with(self):
"""In games, we often put a rectangular “bounding box” around our sprites
in the game. We can then do collision detection between, say, bombs and
spaceships, by comparing whether their rectangles overlap anywhere.
Write a function to determine whether two rectangles collide. Hint: this
might be quite a tough exercise! Think carefully about all the cases
before you code."""
# TODO
pass
if __name__ == "__main__":
# Points
p = Point(5, 5)
q = Point(6, -2)
r = Point(2, -4)
center_circle(p,q,r)
# Fractions
myfraction = Fraction(12, 16)
print myfraction.get_num()
print myfraction.get_den()
print myfraction.gcd(4, 98)
myfraction.simplify()
f1 = Fraction(1,2)
f2 = Fraction(1,4)
f4 = f1 * f2
# Rectangle
R = Rectangle(Point(4,5), 10, 5)
assert R.area() == 50
assert R.perimeter() == 30
R.transpose()
print R.diagonal()
r = Rectangle(Point(0, 0), 10, 5)
assert r.contains(Point(0, 0)) == True
assert r.contains(Point(3, 3)) == True
assert r.contains(Point(3, 7)) == False
assert r.contains(Point(3, 5)) == False
assert r.contains(Point(3, 4.99999)) == True
assert r.contains(Point(-3, -3)) == False
| 29.925581 | 81 | 0.594809 |
b66faf75754cb87c8e7fbdf952efd4361a772746 | 28,798 | py | Python | flink-python/pyflink/common/execution_config.py | madfrog2047/flink | 973dbc02ca8656ef4849abecac1652bbb7932107 | [
"Apache-2.0"
] | 3 | 2019-10-09T01:48:20.000Z | 2019-10-09T01:53:15.000Z | flink-python/pyflink/common/execution_config.py | madfrog2047/flink | 973dbc02ca8656ef4849abecac1652bbb7932107 | [
"Apache-2.0"
] | 1 | 2019-06-07T13:04:18.000Z | 2019-06-07T13:04:18.000Z | flink-python/pyflink/common/execution_config.py | madfrog2047/flink | 973dbc02ca8656ef4849abecac1652bbb7932107 | [
"Apache-2.0"
] | 2 | 2016-07-29T06:53:02.000Z | 2016-09-09T12:55:02.000Z | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import sys
from pyflink.common.execution_mode import ExecutionMode
from pyflink.common.input_dependency_constraint import InputDependencyConstraint
from pyflink.common.restart_strategy import RestartStrategies
from pyflink.java_gateway import get_gateway
from pyflink.util.utils import load_java_class
if sys.version >= '3':
unicode = str
__all__ = ['ExecutionConfig']
class ExecutionConfig(object):
"""
A config to define the behavior of the program execution. It allows to define (among other
options) the following settings:
- The default parallelism of the program, i.e., how many parallel tasks to use for
all functions that do not define a specific value directly.
- The number of retries in the case of failed executions.
- The delay between execution retries.
- The :class:`ExecutionMode` of the program: Batch or Pipelined.
The default execution mode is :data:`ExecutionMode.PIPELINED`
- Enabling or disabling the "closure cleaner". The closure cleaner pre-processes
the implementations of functions. In case they are (anonymous) inner classes,
it removes unused references to the enclosing class to fix certain serialization-related
problems and to reduce the size of the closure.
- The config allows to register types and serializers to increase the efficiency of
handling *generic types* and *POJOs*. This is usually only needed
when the functions return not only the types declared in their signature, but
also subclasses of those types.
:data:`PARALLELISM_DEFAULT`:
The flag value indicating use of the default parallelism. This value can
be used to reset the parallelism back to the default state.
:data:`PARALLELISM_UNKNOWN`:
The flag value indicating an unknown or unset parallelism. This value is
not a valid parallelism and indicates that the parallelism should remain
unchanged.
"""
PARALLELISM_DEFAULT = -1
PARALLELISM_UNKNOWN = -2
def __init__(self, j_execution_config):
self._j_execution_config = j_execution_config
def enable_closure_cleaner(self):
"""
Enables the ClosureCleaner. This analyzes user code functions and sets fields to null
that are not used. This will in most cases make closures or anonymous inner classes
serializable that where not serializable due to some Scala or Java implementation artifact.
User code must be serializable because it needs to be sent to worker nodes.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.enableClosureCleaner()
return self
def disable_closure_cleaner(self):
"""
Disables the ClosureCleaner.
.. seealso:: :func:`enable_closure_cleaner`
:return: This object.
"""
self._j_execution_config = self._j_execution_config.disableClosureCleaner()
return self
def is_closure_cleaner_enabled(self):
"""
Returns whether the ClosureCleaner is enabled.
.. seealso:: :func:`enable_closure_cleaner`
:return: ``True`` means enable and ``False`` means disable.
"""
return self._j_execution_config.isClosureCleanerEnabled()
def set_auto_watermark_interval(self, interval):
"""
Sets the interval of the automatic watermark emission. Watermarks are used throughout
the streaming system to keep track of the progress of time. They are used, for example,
for time based windowing.
:param interval: The integer value interval between watermarks in milliseconds.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.setAutoWatermarkInterval(interval)
return self
def get_auto_watermark_interval(self):
"""
Returns the interval of the automatic watermark emission.
.. seealso:: :func:`set_auto_watermark_interval`
:return: The integer value interval in milliseconds of the automatic watermark emission.
"""
return self._j_execution_config.getAutoWatermarkInterval()
def set_latency_tracking_interval(self, interval):
"""
Interval for sending latency tracking marks from the sources to the sinks.
Flink will send latency tracking marks from the sources at the specified interval.
Setting a tracking interval <= 0 disables the latency tracking.
:param interval: Integer value interval in milliseconds.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.setLatencyTrackingInterval(interval)
return self
def get_latency_tracking_interval(self):
"""
Returns the latency tracking interval.
:return: The latency tracking interval in milliseconds.
"""
return self._j_execution_config.getLatencyTrackingInterval()
def get_parallelism(self):
"""
Gets the parallelism with which operation are executed by default. Operations can
individually override this value to use a specific parallelism.
Other operations may need to run with a different parallelism - for example calling
a reduce operation over the entire data set will involve an operation that runs
with a parallelism of one (the final reduce to the single result value).
:return: The parallelism used by operations, unless they override that value. This method
returns :data:`ExecutionConfig.PARALLELISM_DEFAULT` if the environment's default
parallelism should be used.
"""
return self._j_execution_config.getParallelism()
def set_parallelism(self, parallelism):
"""
Sets the parallelism for operations executed through this environment.
Setting a parallelism of x here will cause all operators (such as join, map, reduce) to run
with x parallel instances.
This method overrides the default parallelism for this environment.
The local execution environment uses by default a value equal to the number of hardware
contexts (CPU cores / threads). When executing the program via the command line client
from a JAR/Python file, the default parallelism is the one configured for that setup.
:param parallelism: The parallelism to use.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.setParallelism(parallelism)
return self
def get_max_parallelism(self):
"""
Gets the maximum degree of parallelism defined for the program.
The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
defines the number of key groups used for partitioned state.
:return: Maximum degree of parallelism.
"""
return self._j_execution_config.getMaxParallelism()
def set_max_parallelism(self, max_parallelism):
"""
Sets the maximum degree of parallelism defined for the program.
The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
defines the number of key groups used for partitioned state.
:param max_parallelism: Maximum degree of parallelism to be used for the program.
"""
self._j_execution_config.setMaxParallelism(max_parallelism)
def get_task_cancellation_interval(self):
"""
Gets the interval (in milliseconds) between consecutive attempts to cancel a running task.
:return: The integer value interval in milliseconds.
"""
return self._j_execution_config.getTaskCancellationInterval()
def set_task_cancellation_interval(self, interval):
"""
Sets the configuration parameter specifying the interval (in milliseconds)
between consecutive attempts to cancel a running task.
:param interval: The integer value interval in milliseconds.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.setTaskCancellationInterval(interval)
return self
def get_task_cancellation_timeout(self):
"""
Returns the timeout (in milliseconds) after which an ongoing task
cancellation leads to a fatal TaskManager error.
The value ``0`` means that the timeout is disabled. In
this case a stuck cancellation will not lead to a fatal error.
:return: The timeout in milliseconds.
"""
return self._j_execution_config.getTaskCancellationTimeout()
def set_task_cancellation_timeout(self, timeout):
"""
Sets the timeout (in milliseconds) after which an ongoing task cancellation
is considered failed, leading to a fatal TaskManager error.
The cluster default is configured via ``TaskManagerOptions#TASK_CANCELLATION_TIMEOUT``.
The value ``0`` disables the timeout. In this case a stuck
cancellation will not lead to a fatal error.
:param timeout: The task cancellation timeout (in milliseconds).
:return: This object.
"""
self._j_execution_config = self._j_execution_config.setTaskCancellationTimeout(timeout)
return self
def set_restart_strategy(self, restart_strategy_configuration):
"""
Sets the restart strategy to be used for recovery.
::
>>> config = env.get_config()
>>> config.set_restart_strategy(RestartStrategies.fixed_delay_restart(10, 1000))
The restart strategy configurations are all created from :class:`RestartStrategies`.
:param restart_strategy_configuration: Configuration defining the restart strategy to use.
"""
self._j_execution_config.setRestartStrategy(
restart_strategy_configuration._j_restart_strategy_configuration)
def get_restart_strategy(self):
"""
Returns the restart strategy which has been set for the current job.
.. seealso:: :func:`set_restart_strategy`
:return: The specified restart configuration.
"""
return RestartStrategies._from_j_restart_strategy(
self._j_execution_config.getRestartStrategy())
def set_execution_mode(self, execution_mode):
"""
Sets the execution mode to execute the program. The execution mode defines whether
data exchanges are performed in a batch or on a pipelined manner.
The default execution mode is :data:`ExecutionMode.PIPELINED`.
Example:
::
>>> config.set_execution_mode(ExecutionMode.BATCH)
:param execution_mode: The execution mode to use. The execution mode could be
:data:`ExecutionMode.PIPELINED`,
:data:`ExecutionMode.PIPELINED_FORCED`,
:data:`ExecutionMode.BATCH` or
:data:`ExecutionMode.BATCH_FORCED`.
"""
self._j_execution_config.setExecutionMode(
ExecutionMode._to_j_execution_mode(execution_mode))
def get_execution_mode(self):
"""
Gets the execution mode used to execute the program. The execution mode defines whether
data exchanges are performed in a batch or on a pipelined manner.
The default execution mode is :data:`ExecutionMode.PIPELINED`.
.. seealso:: :func:`set_execution_mode`
:return: The execution mode for the program.
"""
j_execution_mode = self._j_execution_config.getExecutionMode()
return ExecutionMode._from_j_execution_mode(j_execution_mode)
def set_default_input_dependency_constraint(self, input_dependency_constraint):
"""
Sets the default input dependency constraint for vertex scheduling. It indicates when a
task should be scheduled considering its inputs status.
The default constraint is :data:`InputDependencyConstraint.ANY`.
Example:
::
>>> config.set_default_input_dependency_constraint(InputDependencyConstraint.ALL)
:param input_dependency_constraint: The input dependency constraint. The constraints could
be :data:`InputDependencyConstraint.ANY` or
:data:`InputDependencyConstraint.ALL`.
"""
self._j_execution_config.setDefaultInputDependencyConstraint(
InputDependencyConstraint._to_j_input_dependency_constraint(
input_dependency_constraint))
def get_default_input_dependency_constraint(self):
"""
Gets the default input dependency constraint for vertex scheduling. It indicates when a
task should be scheduled considering its inputs status.
The default constraint is :data:`InputDependencyConstraint.ANY`.
.. seealso:: :func:`set_default_input_dependency_constraint`
:return: The input dependency constraint of this job. The possible constraints are
:data:`InputDependencyConstraint.ANY` and :data:`InputDependencyConstraint.ALL`.
"""
j_input_dependency_constraint = self._j_execution_config\
.getDefaultInputDependencyConstraint()
return InputDependencyConstraint._from_j_input_dependency_constraint(
j_input_dependency_constraint)
def enable_force_kryo(self):
"""
Force TypeExtractor to use Kryo serializer for POJOS even though we could analyze as POJO.
In some cases this might be preferable. For example, when using interfaces
with subclasses that cannot be analyzed as POJO.
"""
self._j_execution_config.enableForceKryo()
def disable_force_kryo(self):
"""
Disable use of Kryo serializer for all POJOs.
"""
self._j_execution_config.disableForceKryo()
def is_force_kryo_enabled(self):
"""
:return: Boolean value that represent whether the usage of Kryo serializer for all POJOs
is enabled.
"""
return self._j_execution_config.isForceKryoEnabled()
def enable_generic_types(self):
"""
Enables the use generic types which are serialized via Kryo.
Generic types are enabled by default.
.. seealso:: :func:`disable_generic_types`
"""
self._j_execution_config.enableGenericTypes()
def disable_generic_types(self):
"""
Disables the use of generic types (types that would be serialized via Kryo). If this option
is used, Flink will throw an ``UnsupportedOperationException`` whenever it encounters
a data type that would go through Kryo for serialization.
Disabling generic types can be helpful to eagerly find and eliminate the use of types
that would go through Kryo serialization during runtime. Rather than checking types
individually, using this option will throw exceptions eagerly in the places where generic
types are used.
**Important:** We recommend to use this option only during development and pre-production
phases, not during actual production use. The application program and/or the input data may
be such that new, previously unseen, types occur at some point. In that case, setting this
option would cause the program to fail.
.. seealso:: :func:`enable_generic_types`
"""
self._j_execution_config.disableGenericTypes()
def has_generic_types_disabled(self):
"""
Checks whether generic types are supported. Generic types are types that go through Kryo
during serialization.
Generic types are enabled by default.
.. seealso:: :func:`enable_generic_types`
.. seealso:: :func:`disable_generic_types`
:return: Boolean value that represent whether the generic types are supported.
"""
return self._j_execution_config.hasGenericTypesDisabled()
def enable_auto_generated_uids(self):
"""
Enables the Flink runtime to auto-generate UID's for operators.
.. seealso:: :func:`disable_auto_generated_uids`
"""
self._j_execution_config.enableAutoGeneratedUIDs()
def disable_auto_generated_uids(self):
"""
Disables auto-generated UIDs. Forces users to manually specify UIDs
on DataStream applications.
It is highly recommended that users specify UIDs before deploying to
production since they are used to match state in savepoints to operators
in a job. Because auto-generated ID's are likely to change when modifying
a job, specifying custom IDs allow an application to evolve overtime
without discarding state.
"""
self._j_execution_config.disableAutoGeneratedUIDs()
def has_auto_generated_uids_enabled(self):
"""
Checks whether auto generated UIDs are supported.
Auto generated UIDs are enabled by default.
.. seealso:: :func:`enable_auto_generated_uids`
.. seealso:: :func:`disable_auto_generated_uids`
:return: Boolean value that represent whether auto generated UIDs are supported.
"""
return self._j_execution_config.hasAutoGeneratedUIDsEnabled()
def enable_force_avro(self):
"""
Forces Flink to use the Apache Avro serializer for POJOs.
**Important:** Make sure to include the *flink-avro* module.
"""
self._j_execution_config.enableForceAvro()
def disable_force_avro(self):
"""
Disables the Apache Avro serializer as the forced serializer for POJOs.
"""
self._j_execution_config.disableForceAvro()
def is_force_avro_enabled(self):
"""
Returns whether the Apache Avro is the default serializer for POJOs.
:return: Boolean value that represent whether the Apache Avro is the default serializer
for POJOs.
"""
return self._j_execution_config.isForceAvroEnabled()
def enable_object_reuse(self):
"""
Enables reusing objects that Flink internally uses for deserialization and passing
data to user-code functions. Keep in mind that this can lead to bugs when the
user-code function of an operation is not aware of this behaviour.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.enableObjectReuse()
return self
def disable_object_reuse(self):
"""
Disables reusing objects that Flink internally uses for deserialization and passing
data to user-code functions.
.. seealso:: :func:`enable_object_reuse`
:return: This object.
"""
self._j_execution_config = self._j_execution_config.disableObjectReuse()
return self
def is_object_reuse_enabled(self):
"""
Returns whether object reuse has been enabled or disabled.
.. seealso:: :func:`enable_object_reuse`
:return: Boolean value that represent whether object reuse has been enabled or disabled.
"""
return self._j_execution_config.isObjectReuseEnabled()
def enable_sysout_logging(self):
"""
Enables the printing of progress update messages to stdout.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.enableSysoutLogging()
return self
def disable_sysout_logging(self):
"""
Disables the printing of progress update messages to stdout.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.disableSysoutLogging()
return self
def is_sysout_logging_enabled(self):
"""
Gets whether progress update messages should be printed to stdout.
:return: True, if progress update messages should be printed, false otherwise.
"""
return self._j_execution_config.isSysoutLoggingEnabled()
def get_global_job_parameters(self):
"""
Gets current configuration dict.
:return: The configuration dict.
"""
return dict(self._j_execution_config.getGlobalJobParameters().toMap())
def set_global_job_parameters(self, global_job_parameters_dict):
"""
Register a custom, serializable user configuration dict.
Example:
::
>>> config.set_global_job_parameters({"environment.checkpoint_interval": "1000"})
:param global_job_parameters_dict: Custom user configuration dict.
"""
gateway = get_gateway()
Configuration = gateway.jvm.org.apache.flink.configuration.Configuration
j_global_job_parameters = Configuration()
for key in global_job_parameters_dict:
if not isinstance(global_job_parameters_dict[key], (str, unicode)):
value = str(global_job_parameters_dict[key])
else:
value = global_job_parameters_dict[key]
j_global_job_parameters.setString(key, value)
self._j_execution_config.setGlobalJobParameters(j_global_job_parameters)
def add_default_kryo_serializer(self, type_class_name, serializer_class_name):
"""
Adds a new Kryo default serializer to the Runtime.
Example:
::
>>> config.add_default_kryo_serializer("com.aaa.bbb.PojoClass",
... "com.aaa.bbb.Serializer")
:param type_class_name: The full-qualified java class name of the types serialized with the
given serializer.
:param serializer_class_name: The full-qualified java class name of the serializer to use.
"""
type_clz = load_java_class(type_class_name)
j_serializer_clz = load_java_class(serializer_class_name)
self._j_execution_config.addDefaultKryoSerializer(type_clz, j_serializer_clz)
def register_type_with_kryo_serializer(self, type_class_name, serializer_class_name):
"""
Registers the given Serializer via its class as a serializer for the given type at the
KryoSerializer.
Example:
::
>>> config.register_type_with_kryo_serializer("com.aaa.bbb.PojoClass",
... "com.aaa.bbb.Serializer")
:param type_class_name: The full-qualified java class name of the types serialized with
the given serializer.
:param serializer_class_name: The full-qualified java class name of the serializer to use.
"""
type_clz = load_java_class(type_class_name)
j_serializer_clz = load_java_class(serializer_class_name)
self._j_execution_config.registerTypeWithKryoSerializer(type_clz, j_serializer_clz)
def register_pojo_type(self, type_class_name):
"""
Registers the given type with the serialization stack. If the type is eventually
serialized as a POJO, then the type is registered with the POJO serializer. If the
type ends up being serialized with Kryo, then it will be registered at Kryo to make
sure that only tags are written.
Example:
::
>>> config.register_pojo_type("com.aaa.bbb.PojoClass")
:param type_class_name: The full-qualified java class name of the type to register.
"""
type_clz = load_java_class(type_class_name)
self._j_execution_config.registerPojoType(type_clz)
def register_kryo_type(self, type_class_name):
"""
Registers the given type with the serialization stack. If the type is eventually
serialized as a POJO, then the type is registered with the POJO serializer. If the
type ends up being serialized with Kryo, then it will be registered at Kryo to make
sure that only tags are written.
Example:
::
>>> config.register_kryo_type("com.aaa.bbb.KryoClass")
:param type_class_name: The full-qualified java class name of the type to register.
"""
type_clz = load_java_class(type_class_name)
self._j_execution_config.registerKryoType(type_clz)
def get_registered_types_with_kryo_serializer_classes(self):
"""
Returns the registered types with their Kryo Serializer classes.
:return: The dict which the keys are full-qualified java class names of the registered
types and the values are full-qualified java class names of the Kryo Serializer
classes.
"""
j_clz_map = self._j_execution_config.getRegisteredTypesWithKryoSerializerClasses()
registered_serializers = {}
for key in j_clz_map:
registered_serializers[key.getName()] = j_clz_map[key].getName()
return registered_serializers
def get_default_kryo_serializer_classes(self):
"""
Returns the registered default Kryo Serializer classes.
:return: The dict which the keys are full-qualified java class names of the registered
types and the values are full-qualified java class names of the Kryo default
Serializer classes.
"""
j_clz_map = self._j_execution_config.getDefaultKryoSerializerClasses()
default_kryo_serializers = {}
for key in j_clz_map:
default_kryo_serializers[key.getName()] = j_clz_map[key].getName()
return default_kryo_serializers
def get_registered_kryo_types(self):
"""
Returns the registered Kryo types.
:return: The list of full-qualified java class names of the registered Kryo types.
"""
j_clz_set = self._j_execution_config.getRegisteredKryoTypes()
return [value.getName() for value in j_clz_set]
def get_registered_pojo_types(self):
"""
Returns the registered POJO types.
:return: The list of full-qualified java class names of the registered POJO types.
"""
j_clz_set = self._j_execution_config.getRegisteredPojoTypes()
return [value.getName() for value in j_clz_set]
def is_auto_type_registration_disabled(self):
"""
Returns whether Flink is automatically registering all types in the user programs with
Kryo.
:return: ``True`` means auto type registration is disabled and ``False`` means enabled.
"""
return self._j_execution_config.isAutoTypeRegistrationDisabled()
def disable_auto_type_registration(self):
"""
Control whether Flink is automatically registering all types in the user programs with
Kryo.
"""
self._j_execution_config.disableAutoTypeRegistration()
def is_use_snapshot_compression(self):
"""
Returns whether he compression (snappy) for keyed state in full checkpoints and savepoints
is enabled.
:return: ``True`` means enabled and ``False`` means disabled.
"""
return self._j_execution_config.isUseSnapshotCompression()
def set_use_snapshot_compression(self, use_snapshot_compression):
"""
Control whether the compression (snappy) for keyed state in full checkpoints and savepoints
is enabled.
:param use_snapshot_compression: ``True`` means enabled and ``False`` means disabled.
"""
self._j_execution_config.setUseSnapshotCompression(use_snapshot_compression)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
self._j_execution_config == other._j_execution_config
def __hash__(self):
return self._j_execution_config.hashCode()
| 39.941748 | 99 | 0.682061 |
91b2d800b8a43d4b0f5731accc1bf8521020eecf | 1,237 | py | Python | sqlparse/engine/filter_stack.py | medvykes/djang | 5b0619e0de0a80cae1a670726b9a8e68609420f5 | [
"MIT"
] | null | null | null | sqlparse/engine/filter_stack.py | medvykes/djang | 5b0619e0de0a80cae1a670726b9a8e68609420f5 | [
"MIT"
] | null | null | null | sqlparse/engine/filter_stack.py | medvykes/djang | 5b0619e0de0a80cae1a670726b9a8e68609420f5 | [
"MIT"
] | null | null | null | #
# Copyright (C) 2009-2020 the sqlparse authors and contributors
# <see AUTHORS file>
#
# This module is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
"""filter"""
from sqlparse import lexer
from sqlparse.engine import grouping
from sqlparse.engine.statement_splitter import StatementSplitter
class FilterStack:
def __init__(self):
self.preprocess = []
self.stmtprocess = []
self.postprocess = []
self._grouping = False
def enable_grouping(self):
self._grouping = True
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Output: Stream processed Statements
for stmt in stream:
if self._grouping:
stmt = grouping.group(stmt)
for filter_ in self.stmtprocess:
filter_.process(stmt)
for filter_ in self.postprocess:
stmt = filter_.process(stmt)
yield stmt
| 27.488889 | 65 | 0.616006 |
ab5d16560d72c7c24bef5e6279fe0a511bd2795a | 4,841 | py | Python | networks/FlowNetC.py | tomrunia/flownet2-pytorch | 759b09c375348cf64f52f914cf3bf3e9095cc959 | [
"Apache-2.0"
] | null | null | null | networks/FlowNetC.py | tomrunia/flownet2-pytorch | 759b09c375348cf64f52f914cf3bf3e9095cc959 | [
"Apache-2.0"
] | null | null | null | networks/FlowNetC.py | tomrunia/flownet2-pytorch | 759b09c375348cf64f52f914cf3bf3e9095cc959 | [
"Apache-2.0"
] | null | null | null | import warnings
warnings.filterwarnings("ignore")
import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
import math
import numpy as np
from .correlation_package.correlation import Correlation
from .submodules import *
'Parameter count , 39,175,298 '
class FlowNetC(nn.Module):
def __init__(self,args, batchNorm=True, div_flow = 20):
super(FlowNetC,self).__init__()
self.batchNorm = batchNorm
self.div_flow = div_flow
self.conv1 = conv(self.batchNorm, 3, 64, kernel_size=7, stride=2)
self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2)
self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2)
self.conv_redir = conv(self.batchNorm, 256, 32, kernel_size=1, stride=1)
if args.fp16:
self.corr = nn.Sequential(
tofp32(),
Correlation(pad_size=20, kernel_size=1, max_displacement=20, stride1=1, stride2=2, corr_multiply=1),
tofp16())
else:
self.corr = Correlation(pad_size=20, kernel_size=1, max_displacement=20, stride1=1, stride2=2, corr_multiply=1)
self.corr_activation = nn.LeakyReLU(0.1,inplace=True)
self.conv3_1 = conv(self.batchNorm, 473, 256)
self.conv4 = conv(self.batchNorm, 256, 512, stride=2)
self.conv4_1 = conv(self.batchNorm, 512, 512)
self.conv5 = conv(self.batchNorm, 512, 512, stride=2)
self.conv5_1 = conv(self.batchNorm, 512, 512)
self.conv6 = conv(self.batchNorm, 512, 1024, stride=2)
self.conv6_1 = conv(self.batchNorm,1024, 1024)
self.deconv5 = deconv(1024,512)
self.deconv4 = deconv(1026,256)
self.deconv3 = deconv(770,128)
self.deconv2 = deconv(386,64)
self.predict_flow6 = predict_flow(1024)
self.predict_flow5 = predict_flow(1026)
self.predict_flow4 = predict_flow(770)
self.predict_flow3 = predict_flow(386)
self.predict_flow2 = predict_flow(194)
self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
# init_deconv_bilinear(m.weight)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
def forward(self, x):
x1 = x[:,0:3,:,:]
x2 = x[:,3::,:,:]
out_conv1a = self.conv1(x1)
out_conv2a = self.conv2(out_conv1a)
out_conv3a = self.conv3(out_conv2a)
# FlownetC bottom input stream
out_conv1b = self.conv1(x2)
out_conv2b = self.conv2(out_conv1b)
out_conv3b = self.conv3(out_conv2b)
# Merge streams
out_corr = self.corr(out_conv3a, out_conv3b) # False
out_corr = self.corr_activation(out_corr)
# Redirect top input stream and concatenate
out_conv_redir = self.conv_redir(out_conv3a)
in_conv3_1 = torch.cat((out_conv_redir, out_corr), 1)
# Merged conv layers
out_conv3_1 = self.conv3_1(in_conv3_1)
out_conv4 = self.conv4_1(self.conv4(out_conv3_1))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = self.upsampled_flow6_to_5(flow6)
out_deconv5 = self.deconv5(out_conv6)
concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1)
flow5 = self.predict_flow5(concat5)
flow5_up = self.upsampled_flow5_to_4(flow5)
out_deconv4 = self.deconv4(concat5)
concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1)
flow4 = self.predict_flow4(concat4)
flow4_up = self.upsampled_flow4_to_3(flow4)
out_deconv3 = self.deconv3(concat4)
concat3 = torch.cat((out_conv3_1,out_deconv3,flow4_up),1)
flow3 = self.predict_flow3(concat3)
flow3_up = self.upsampled_flow3_to_2(flow3)
out_deconv2 = self.deconv2(concat3)
concat2 = torch.cat((out_conv2a,out_deconv2,flow3_up),1)
flow2 = self.predict_flow2(concat2)
if self.training:
return flow2,flow3,flow4,flow5,flow6
else:
return flow2,
| 36.126866 | 123 | 0.628383 |
a4bc319c07aaaf0b634b4613dafc66092466d97b | 3,345 | py | Python | robonaldo/core.py | xtrm-en/robonaldo | 91601bad88043effbb717e40467526fe11bd4cb5 | [
"0BSD"
] | null | null | null | robonaldo/core.py | xtrm-en/robonaldo | 91601bad88043effbb717e40467526fe11bd4cb5 | [
"0BSD"
] | null | null | null | robonaldo/core.py | xtrm-en/robonaldo | 91601bad88043effbb717e40467526fe11bd4cb5 | [
"0BSD"
] | null | null | null | from argparse import ArgumentParser
from robonaldo.context.robot import RobotColor, RobotOwnership
from robonaldo.context.updater import ContextUpdater
from robonaldo.controller import GameController
from robonaldo.log import Logger, LogLevel
from robonaldo.network import NetworkHandler
from robonaldo.strategy import StrategyManager
from robonaldo.utils import Singleton
class Robonaldo(metaclass = Singleton):
__constructed = False
__connected = False
__initialized = False
def construct(self) -> None:
if self.__constructed is True:
return
self.__constructed = True
self.__logger = Logger(name = 'Robonaldo')
self.__logger.info("Setting up NetworkHandler...")
self.network_handler = NetworkHandler()
self.__logger.info("Setting up ContextUpdater...")
self.context_updater = ContextUpdater(self.network_handler)
self.__logger.info("Initializing GameController with NetworkHandler...")
GameController().set_network(self.network_handler)
# self.__logger.info("Setting up StrategyManager...")
# StrategyManager().construct()
def connect(self, host: str, key: str) -> None:
self.__logger.info("Initializing connection to the server...")
self.network_handler.connect(host = host, key = key, wait = True)
self.__logger.info("Connection established.")
self.__connected = True
def initialize(self, team_color: RobotColor, side: int = 1) -> None:
if self.__connected is not True:
self.__logger.error("Robonaldo client not connected to a server!")
return
self.side = side
if self.__initialized is True:
return
self.__initialized = True
self.__logger.info("Defining color as " + team_color.name + ".")
self.team_color = team_color
self.ally_map = {
team_color: RobotOwnership.ALLY,
team_color.get_other(): RobotOwnership.ENEMY
}
self.__logger.info("Initializing ContextUpdater...")
self.context_updater.initialize()
self.__logger.info("Registering StrategyManager to ContextUpdater...")
StrategyManager().register_on(self.context_updater)
self.__logger.info("Let's rock.")
def stop(self) -> None:
self.__logger.info("Stopping...")
for robot in GameController().controllers:
try:
robot.control(0, 0, 0)
except:
# might not have da key, who cares
pass
self.network_handler.close()
if StrategyManager().attack_thread is not None:
StrategyManager().attack_thread.stop()
if StrategyManager().defense_thread is not None:
StrategyManager().defense_thread.stop()
Robonaldo().construct()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("color", help="our team's color")
parser.add_argument("host", help="the host controller IPv4 address")
parser.add_argument("-k", "--key", type=str, help="the team's api key")
args = parser.parse_args()
color = RobotColor[args.color]
host = args.host
key = args.key
if key is None:
key = ''
Robonaldo().initialize(team_color = color, host = host, key = key) | 33.787879 | 80 | 0.655605 |
622e0a9c66f62bb7831c5fe1808530df92068514 | 1,497 | py | Python | Abstract_Base_Classes(ABC).py | maiconloure/Learning_Python | 2999508909ace5f8ca0708cdea93b82abaaeafb2 | [
"MIT"
] | null | null | null | Abstract_Base_Classes(ABC).py | maiconloure/Learning_Python | 2999508909ace5f8ca0708cdea93b82abaaeafb2 | [
"MIT"
] | null | null | null | Abstract_Base_Classes(ABC).py | maiconloure/Learning_Python | 2999508909ace5f8ca0708cdea93b82abaaeafb2 | [
"MIT"
] | null | null | null | import abc
# ABC é a superclasse para classes abstratas
# Uma classe abstrata nao pode ser instanciada e deve conter pelo menos um metodo abstrato
class Funcionario(abc.ABC): # agora não é possivel instânciar a classe => Funcionario()
def __init__(self, nome, cpf, salario):
self._nome = nome
self._cpf = cpf
self._salario = salario
@abc.abstractmethod ### agora get_bonificacao é obrigatorio para todas subclasse de Funcionario
def get_bonificacao(self):
return self._salario * 0.10
class ControleDeBonificacoes:
def __init__(self, total=0):
self.total = total
def registra(self, funcionario):
if hasattr(funcionario, 'get_bonificacao'):
self.total += funcionario.get_bonificacao()
else:
print(f'instância de {funcionario.__class__.__name__} não implementa o método get_bonificação')
@property
def total_bonificacoes(self):
return self.total
class Gerente(Funcionario):
def __init__(self, nome, cpf, salario, senha, qtd_funcionarios):
super().__init__(nome, cpf, salario)
self._senha = senha
self._qtd_funcionarios = qtd_funcionarios
def get_bonificacao(self):
return super().get_bonificacao() + 1000
if __name__ == '__main__':
funcionario = Funcionario('Pedro', '111111111-11', 2000.0)
print(f'bonificação funcionário: {funcionario.get_bonificacao()}')
print(f"variaveis de funcionario: {vars(funcionario)}\n")
| 31.1875 | 107 | 0.690715 |
7b079ec9deed15fc836045ae95b2a75fdae4a9d2 | 412 | py | Python | tests/h/traversal/profile_test.py | pombredanne/h | 9c4c2dc0d53ed5bed5183936c24b4c27b23070b4 | [
"BSD-2-Clause"
] | null | null | null | tests/h/traversal/profile_test.py | pombredanne/h | 9c4c2dc0d53ed5bed5183936c24b4c27b23070b4 | [
"BSD-2-Clause"
] | null | null | null | tests/h/traversal/profile_test.py | pombredanne/h | 9c4c2dc0d53ed5bed5183936c24b4c27b23070b4 | [
"BSD-2-Clause"
] | null | null | null | from unittest.mock import sentinel
import pytest
from h.traversal.profile import ProfileRoot
class TestProfileRoot:
def test_acl_for_profile(self, ACL):
acl = ProfileRoot(sentinel.request).__acl__()
ACL.for_profile.assert_called_once_with()
assert acl == ACL.for_profile.return_value
@pytest.fixture
def ACL(self, patch):
return patch("h.traversal.profile.ACL")
| 22.888889 | 53 | 0.720874 |
fdeb86478b8afe44680864f285f575d71c40eb1f | 1,327 | py | Python | pydeel/Convert2fasta.py | alegione/pydeel | 6c02aa18e9d40b22746727e4718ed4f87cd574cd | [
"MIT"
] | null | null | null | pydeel/Convert2fasta.py | alegione/pydeel | 6c02aa18e9d40b22746727e4718ed4f87cd574cd | [
"MIT"
] | null | null | null | pydeel/Convert2fasta.py | alegione/pydeel | 6c02aa18e9d40b22746727e4718ed4f87cd574cd | [
"MIT"
] | null | null | null | '''
Module : Convert2fasta
Description : Tool to convert input files to fasta.
Copyright : (c) Alistair Legione, 11 Jul 2019
License : MIT
Maintainer : legionea@unimelb.edu.au
Portability : POSIX
'''
from Bio import SeqIO
def gbkToFasta(gbk_filename, fasta_filename):
input_handle = open(gbk_filename, "r")
output_handle = open(fasta_filename, "w")
for seq_record in SeqIO.parse(input_handle, "genbank") :
print("Converting GenBank record %s to FASTA" % seq_record.id)
output_handle.write(">%s %s\n%s\n" % (
seq_record.id,
seq_record.description,
seq_record.seq))
output_handle.close()
input_handle.close()
return None
def gbkTofaa(gbk_filename, faa_filename):
input_handle = open(gbk_filename, "r")
output_handle = open(faa_filename, "w")
for seq_feature in seq_record.features:
if seq_feature.type=="CDS":
assert len(seq_feature.qualifiers['translation'])==1
output_handle.write(">%s from %s\n%s\n" % (
seq_feature.qualifiers['locus_tag'][0],
seq_record.name,
seq_feature.qualifiers['translation'][0]))
output_handle.close()
input_handle.close()
return None
if __name__ == '__main__':
main()
| 24.574074 | 70 | 0.629992 |
2c4879759b27fac876313d4a1202eaedc5c84f7f | 7,295 | py | Python | tests/gem5/suite.py | mrunalkp/cs251 | 894d1c7fa2e33c1af1844b4b3c420cf37d649027 | [
"BSD-3-Clause"
] | 135 | 2016-10-21T03:31:49.000Z | 2022-03-25T01:22:20.000Z | tests/gem5/suite.py | akeley98/FU-pools | dcd47b7dad279246093081cab24b95cae363c3b3 | [
"BSD-3-Clause"
] | 35 | 2017-03-10T17:57:46.000Z | 2022-02-18T17:34:16.000Z | tests/gem5/suite.py | akeley98/FU-pools | dcd47b7dad279246093081cab24b95cae363c3b3 | [
"BSD-3-Clause"
] | 48 | 2016-12-08T12:03:13.000Z | 2022-02-16T09:16:13.000Z | # Copyright (c) 2017 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Sean Wilson
import os
import copy
import subprocess
import sys
from testlib.test import TestFunction
from testlib.suite import TestSuite
from testlib.helper import log_call
from testlib.config import constants, config
from fixture import TempdirFixture, Gem5Fixture, VariableFixture
import verifier
def gem5_verify_config(name,
config,
config_args,
verifiers,
gem5_args=tuple(),
fixtures=[],
valid_isas=constants.supported_isas,
valid_variants=constants.supported_variants,
length=constants.supported_lengths[0],
protocol=None):
'''
Helper class to generate common gem5 tests using verifiers.
The generated TestSuite will run gem5 with the provided config and
config_args. After that it will run any provided verifiers to verify
details about the gem5 run.
.. seealso:: For the verifiers see :mod:`testlib.gem5.verifier`
:param name: Name of the test.
:param config: The config to give gem5.
:param config_args: A list of arguments to pass to the given config.
:param verifiers: An iterable with Verifier instances which will be placed
into a suite that will be ran after a gem5 run.
:param gem5_args: An iterable with arguments to give to gem5. (Arguments
that would normally go before the config path.)
:param valid_isas: An iterable with the isas that this test can be ran
for. If None given, will run for all supported_isas.
:param valid_variants: An iterable with the variant levels that
this test can be ran for. (E.g. opt, debug)
'''
fixtures = list(fixtures)
testsuites = []
# Obtain the set of tests to ignore. This is found in the
# ".testignore" file.
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
_test_ignore_file_loc = os.path.join(__location__,".testignore")
ignore = set()
if os.path.exists(_test_ignore_file_loc):
ignore.update(open(_test_ignore_file_loc).read().splitlines())
for opt in valid_variants:
for isa in valid_isas:
# Create a tempdir fixture to be shared throughout the test.
tempdir = TempdirFixture()
gem5_returncode = VariableFixture(
name=constants.gem5_returncode_fixture_name)
# Common name of this generated testcase.
_name = '{given_name}-{isa}-{opt}'.format(
given_name=name,
isa=isa,
opt=opt)
if protocol:
_name += '-'+protocol
# We check to see if this test suite is to be ignored. If so, we
# skip it.
if _name in ignore:
continue
# Create the running of gem5 subtest.
# NOTE: We specifically create this test before our verifiers so
# this is listed first.
tests = []
gem5_execution = TestFunction(
_create_test_run_gem5(config, config_args, gem5_args),
name=_name)
tests.append(gem5_execution)
# Create copies of the verifier subtests for this isa and
# variant.
for verifier in verifiers:
tests.append(verifier.instantiate_test(_name))
# Add the isa and variant to tags list.
tags = [isa, opt, length]
# Create the gem5 target for the specific architecture and
# variant.
_fixtures = copy.copy(fixtures)
_fixtures.append(Gem5Fixture(isa, opt, protocol))
_fixtures.append(tempdir)
_fixtures.append(gem5_returncode)
# Finally construct the self contained TestSuite out of our
# tests.
testsuites.append(TestSuite(
name=_name,
fixtures=_fixtures,
tags=tags,
tests=tests))
return testsuites
def _create_test_run_gem5(config, config_args, gem5_args):
def test_run_gem5(params):
'''
Simple \'test\' which runs gem5 and saves the result into a tempdir.
NOTE: Requires fixtures: tempdir, gem5
'''
fixtures = params.fixtures
if gem5_args is None:
_gem5_args = tuple()
elif isinstance(gem5_args, str):
# If just a single str, place it in an iterable
_gem5_args = (gem5_args,)
else:
_gem5_args = gem5_args
# FIXME/TODO: I don't like the idea of having to modify this test run
# or always collect results even if not using a verifier. There should
# be some configuration in here that only gathers certain results for
# certain verifiers.
#
# I.E. Only the returncode verifier will use the gem5_returncode
# fixture, but we always require it even if that verifier isn't being
# ran.
returncode = fixtures[constants.gem5_returncode_fixture_name]
tempdir = fixtures[constants.tempdir_fixture_name].path
gem5 = fixtures[constants.gem5_binary_fixture_name].path
command = [
gem5,
'-d', # Set redirect dir to tempdir.
tempdir,
'-re',# TODO: Change to const. Redirect stdout and stderr
]
command.extend(_gem5_args)
command.append(config)
# Config_args should set up the program args.
command.extend(config_args)
returncode.value = log_call(params.log, command, stderr=sys.stderr)
return test_run_gem5
| 39.863388 | 78 | 0.650446 |
089da7bee36e711da7a2699152ae63c2c95b31b0 | 4,259 | py | Python | keep_backend/privacy/map.py | 9929105/KEEP | a3e8b00f82367e13835e5137bd5c0eaa7c8d26d2 | [
"MIT"
] | null | null | null | keep_backend/privacy/map.py | 9929105/KEEP | a3e8b00f82367e13835e5137bd5c0eaa7c8d26d2 | [
"MIT"
] | null | null | null | keep_backend/privacy/map.py | 9929105/KEEP | a3e8b00f82367e13835e5137bd5c0eaa7c8d26d2 | [
"MIT"
] | null | null | null | # -*-Python-*-
###############################################################################
#
# File: map2d.py
# RCS: $Header: $
# Description: Transform 2d map coordinates providing Differential Privacy
# Author: Staal Vinterbo
# Created: Wed Mar 27 17:07:29 2013
# Modified: Thu Mar 28 13:25:58 2013 (Staal Vinterbo) staal@mats
# Language: Python
# Package: N/A
# Status: Experimental
#
# (c) Copyright 2013, Staal Vinterbo, all rights reserved.
#
###############################################################################
from random import random
from math import log
import numpy as np
def intervalq(point, bounds):
'''find which interval a point lies in given interval bounds
input: point - number to identify bucket for
bounds - list of increasing bucket bounds including ends
output: index such that bounds[index - 1] <= point < bounds[index]
'''
right = len(bounds) - 1
left = 0
assert(right > 0) # check that bounds contains at least two elements
# deal with points outside bounds range
if point >= bounds[right]:
return right
if point <= bounds[left]:
return 1
# binary search for interval
while left < right - 1:
assert(bounds[left] < bounds[right]) # check that bounds are sorted
mid = (left + right)/2
if point >= bounds[mid]:
left = mid
else:
right = mid
return right
def rlaplace(scale, location=0):
'''genrate a random deviate from Laplace(location, scale)'''
assert(scale > 0)
r = random()
signr = 1 if r >= 0.5 else -1
rr = r if r < 0.5 else 1 - r
return location - signr * scale * log(2 * rr)
def noisyh(h, epsilon=1.0, tau=0.5):
'''make a histogram ina numpy array differentially private.
Expected maximal noise added is O(lon(n)/epsilon) where
n are the number of times noise is added, i.e., size of
histogram. Using this, we set entries that are smaller than
tau * log(n)/epsilon 0.'''
hp = map(lambda x: rlaplace(scale=2/epsilon, location=x), h.flatten())
threshold = tau * log(len(hp))/epsilon
hpt = map(lambda y: 0 if y < threshold else y, hp)
return np.reshape(hpt, h.shape)
def p2m(points, xbounds, ybounds):
'''convert a list of points to histogram.
xbounds and ybounds contain grid axis points
into which points are discretized.'''
xb = sorted(xbounds) # make sure boundaries are sorted
yb = sorted(ybounds) # make sure boundaries are sorted
nxb = len(xb) - 1 # number of x intervals
nyb = len(yb) - 1 # number of y intervals
h = np.zeros((nxb, nyb))
for x, y in points:
i = intervalq(x, xb) - 1
j = intervalq(y, yb) - 1
h[i, j] += 1
return h
def m2p(h, xbounds, ybounds):
'''transform histogram into points
xbounds and ybounds give grid axis points,
meaning that h[i,j] is translated into a
point (x,y) such that x is uniformly distributed
in [xbounds[i], xbounds[i + 1]), and similarly for y.'''
xb = sorted(xbounds) # make sure boundaries are sorted
yb = sorted(ybounds) # make sure boundaries are sorted
nxb = len(xb) - 1 # number of x intervals
nyb = len(yb) - 1 # number of y intervals
assert(h.shape == (nxb, nyb))
points = []
for i in range(nxb):
ax = xb[i]
bx = xb[i + 1]
xwidth = bx - ax
for j in range(nyb):
ay = yb[j]
by = yb[j + 1]
ywidth = by - ay
pnts = map(lambda _: (ax + random() * xwidth,
ay + random() * ywidth),
range(int(h[i, j])))
points = pnts + points
return points
def privatize(points, xbounds, ybounds, epsilon=1.0, tau=1.5):
'''create differentially private version of list of points using a grid
the grid is defined by axis points in xbounds and ybounds.
epsilon is the differential privacy level.
tau is a filtering parameter, see noisyh().
'''
dph = np.array( noisyh( p2m(points, xbounds, ybounds), epsilon, tau).round(), int)
return m2p(dph, xbounds, ybounds)
| 30.205674 | 86 | 0.577835 |
c79a02a7177c1f6098815e572edc3d1513691355 | 644 | py | Python | mundo-1/ex022.py | PedroSantana2/exercicios-python-canal-curso-em-video | 154ae9771e88906c7fcef5efc5799e44acfc2ae3 | [
"MIT"
] | 1 | 2021-03-17T20:16:36.000Z | 2021-03-17T20:16:36.000Z | mundo-1/ex022.py | PedroSantana2/exercicios-python-canal-curso-em-video | 154ae9771e88906c7fcef5efc5799e44acfc2ae3 | [
"MIT"
] | null | null | null | mundo-1/ex022.py | PedroSantana2/exercicios-python-canal-curso-em-video | 154ae9771e88906c7fcef5efc5799e44acfc2ae3 | [
"MIT"
] | null | null | null | '''
Crie um programa que leia o nome completo de uma pessoa e mostre:
- O nome com todas as letras maiúsculas e minúsculas.
- Quantas letras ao todo (sem considerar espaços).
- Quantas letras tem o primeiro nome.
'''
#Recebendo informações:
nome = input('Digite um nome: ')
#Declarando variaveis:
maiusculas = nome.upper()
minusculas = nome.lower()
quantidade_letras = len(nome) - nome.count(' ')
primeiro_nome = len(nome.split()[0])
#Resultado:
print('Seu nome em maiusculas: {}\nSeu nome em minusculas:{}\nSeu nome tem ao todo {} letras\nSeu primeiro nome tem {} letras'.format(maiusculas, minusculas, quantidade_letras, primeiro_nome))
| 33.894737 | 192 | 0.73913 |
4baa1a5b125a9a94fa9ced31389b3a0c5df815fe | 1,615 | py | Python | myapp/views.py | mathewlathara/TSAbaggagedetection | 7a83d756896e14e2d8bb62a9f1628d889778bddc | [
"MIT"
] | null | null | null | myapp/views.py | mathewlathara/TSAbaggagedetection | 7a83d756896e14e2d8bb62a9f1628d889778bddc | [
"MIT"
] | null | null | null | myapp/views.py | mathewlathara/TSAbaggagedetection | 7a83d756896e14e2d8bb62a9f1628d889778bddc | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework import viewsets
from .serializers import passengerSerializer
from .models import passenger
from .detectobject import a_test_file, process_image
# Create your views here.
from django.http import HttpResponse
import uuid
def hello(request):
return HttpResponse("<h2>Hello, Welcome to Django!</h2>")
import datetime
# Create your views here.
from django.http import HttpResponse, HttpResponseNotFound
#importing loading from django template
from django.template import loader
def index(request):
template = loader.get_template('index.html') # getting our template
name = {
'student':'rahul'
}
return HttpResponse(template.render(name)) # rendering the template in HttpResponse
from django.views.decorators.http import require_http_methods
@require_http_methods(["GET"])
def show(request):
return HttpResponse('<h1>This is Http GET request.</h1>')
@require_http_methods(["POST"])
def add_scanobject(request):
print(request.POST)
return render(request, "index.html")
@require_http_methods(["GET"])
def process_image_function(request):
uuidfilename = uuid.uuid4().hex
query = request.GET.get('myvalue')
passval = "D:/Lambton documents/Sem 2 project/luggageproject/myapp/static/testimages/" + query
process_image(passval, uuidfilename)
#functionreturn = a_test_file()
return HttpResponse(uuidfilename)
class passengerViewSet(viewsets.ModelViewSet):
queryset = passenger.objects.all().order_by('passenger_name')
serializer_class = passengerSerializer | 32.3 | 98 | 0.750464 |
bc7a741d62c461ab71a626aa903a1a0d8ad86def | 319 | py | Python | config/wsgi.py | uktrade/return-to-office | d4c53c734611413c9f8a7624e52dc35910c5ff57 | [
"MIT"
] | 1 | 2020-10-25T18:16:47.000Z | 2020-10-25T18:16:47.000Z | config/wsgi.py | uktrade/return-to-office | d4c53c734611413c9f8a7624e52dc35910c5ff57 | [
"MIT"
] | 1 | 2020-10-27T07:11:26.000Z | 2020-10-27T07:11:26.000Z | config/wsgi.py | uktrade/return-to-office | d4c53c734611413c9f8a7624e52dc35910c5ff57 | [
"MIT"
] | null | null | null | """
WSGI config for fido project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 21.266667 | 78 | 0.777429 |
3da67df97dbf64d8c06321984efa18f285a6d5c8 | 1,932 | py | Python | model.py | ashwinmr/Ud_CarND_P4 | 85f1180051f887df81b9ea3c6be3c5894ff35322 | [
"MIT"
] | null | null | null | model.py | ashwinmr/Ud_CarND_P4 | 85f1180051f887df81b9ea3c6be3c5894ff35322 | [
"MIT"
] | null | null | null | model.py | ashwinmr/Ud_CarND_P4 | 85f1180051f887df81b9ea3c6be3c5894ff35322 | [
"MIT"
] | null | null | null |
# coding: utf-8
# # Imports
# In[2]:
import csv
import cv2
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, MaxPooling2D, Dropout, Activation
# # Read csv and load images
# In[3]:
correction = 0.2 # Correction for side cameras
images = []
measurements = []
# Open the csv file
with open('data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
# Skip the header
next(reader)
for row in reader:
# Append center left and right images and augment with flipped images
for i in range(3):
image_path = 'data/IMG/' + row[i].split('/')[-1]
image_bgr = cv2.imread(image_path)
image = cv2.cvtColor(image_bgr,cv2.COLOR_BGR2RGB)
# Get flipped images
image_flipped = cv2.flip(image,1)
images.extend([image,image_flipped])
# Append measurements for center left and right images and augment with flipped images
measurement = float(row[3])
measurements.extend([measurement,-measurement,measurement+correction,-measurement-correction,measurement-correction,-measurement+correction])
# Create the training set
X_train = np.array(images)
y_train = np.array(measurements)
# # Create model
# In[11]:
model = Sequential()
# Normalize the input
model.add(Lambda(lambda x: x/255.0 -0.5, input_shape=(160,320,3)))
# Crop the images to only the road
model.add(Cropping2D(cropping=((70,25),(0,0))))
model.add(Conv2D(32,(3, 3)))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Flatten())
# Get the single steering output
model.add(Dense(1))
# Compile the model
model.compile(loss='mse',optimizer='adam')
# Train the model
model.fit(X_train,y_train,epochs = 3,validation_split=0.2, shuffle = True)
# Save the model
model.save('model.h5')
| 25.76 | 149 | 0.688406 |
7463ff0e4e82d3112d243339029a3e8c6e58b364 | 1,482 | py | Python | code/venv/lib/python3.8/site-packages/datadog_api_client/v2/model/relationship_to_role_data.py | Valisback/hiring-engineers | 7196915dd5a429ae27c21fa43d527f0332e662ed | [
"Apache-2.0"
] | null | null | null | code/venv/lib/python3.8/site-packages/datadog_api_client/v2/model/relationship_to_role_data.py | Valisback/hiring-engineers | 7196915dd5a429ae27c21fa43d527f0332e662ed | [
"Apache-2.0"
] | null | null | null | code/venv/lib/python3.8/site-packages/datadog_api_client/v2/model/relationship_to_role_data.py | Valisback/hiring-engineers | 7196915dd5a429ae27c21fa43d527f0332e662ed | [
"Apache-2.0"
] | null | null | null | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v2.model_utils import (
ModelNormal,
cached_property,
)
def lazy_import():
from datadog_api_client.v2.model.roles_type import RolesType
globals()["RolesType"] = RolesType
class RelationshipToRoleData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
validations = {}
@cached_property
def openapi_types():
lazy_import()
return {
"id": (str,),
"type": (RolesType,),
}
attribute_map = {
"id": "id",
"type": "type",
}
read_only_vars = {}
def __init__(self, *args, **kwargs):
"""RelationshipToRoleData - a model defined in OpenAPI
Keyword Args:
id (str): [optional] ID of the role.
type (RolesType): [optional]
"""
super().__init__(kwargs)
self._check_pos_args(args)
@classmethod
def _from_openapi_data(cls, *args, **kwargs):
"""Helper creating a new instance from a response."""
self = super(RelationshipToRoleData, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
return self
| 23.903226 | 108 | 0.633603 |
27c74da3c5952b08e66c6cf4bfb8f61bc9c31cb1 | 580 | py | Python | Server.py | vuchampion/471A2 | d1156cd673715bb294d0d58dfea30f5b2eafd8ba | [
"MIT"
] | null | null | null | Server.py | vuchampion/471A2 | d1156cd673715bb294d0d58dfea30f5b2eafd8ba | [
"MIT"
] | null | null | null | Server.py | vuchampion/471A2 | d1156cd673715bb294d0d58dfea30f5b2eafd8ba | [
"MIT"
] | null | null | null | import sys, socket
from ServerWorker import ServerWorker
class Server:
def main(self):
try:
SERVER_PORT = int(sys.argv[1])
except:
print("[Usage: Server.py Server_port]\n")
rtspSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
rtspSocket.bind(('', SERVER_PORT))
rtspSocket.listen(5)
# Receive client info (address,port) through RTSP/TCP session
while True:
clientInfo = {}
clientInfo['rtspSocket'] = rtspSocket.accept()
ServerWorker(clientInfo).run()
if __name__ == "__main__":
(Server()).main()
| 22.307692 | 65 | 0.660345 |
805b8dfa261aadc216ee8db528cf4382ce3b8c55 | 3,963 | py | Python | nmapsummariser.py | cornerpirate/nmap-summariser | 9893e7db79b58bac52fa6c8cf9f856c62bafb78e | [
"Apache-2.0"
] | 29 | 2015-11-26T12:39:49.000Z | 2021-09-11T19:59:18.000Z | nmapsummariser.py | cornerpirate/nmap-summariser | 9893e7db79b58bac52fa6c8cf9f856c62bafb78e | [
"Apache-2.0"
] | null | null | null | nmapsummariser.py | cornerpirate/nmap-summariser | 9893e7db79b58bac52fa6c8cf9f856c62bafb78e | [
"Apache-2.0"
] | 13 | 2016-06-12T01:51:29.000Z | 2020-06-21T13:12:26.000Z | #! /usr/bin/python
#
# calling: ./nmapsummariser.py <nmap_xml_file>
# for example: ./nmapsummariser.py nmap-full-tcp.xml
#
# Copyright 2015 cornerpirate.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from xml.dom.minidom import parse
import os
import sys
def hasOpenPort(ports):
for port in ports:
if len(port.getElementsByTagName("state")) > 0:
state_tag = port.getElementsByTagName("state")[0]
state = state_tag.getAttribute("state")
if cmp(state, "open") == 0:
return 1
def main():
files = []
# Check that they provided a parameter, if not end gracefully
if len(sys.argv) < 2:
print "Usage: "+sys.argv[0]+" <nmap_xml_file> "\
"\nUsage: "+sys.argv[0]+" <nmap_xml_file1> <nmap_xml_file22>"\
"\nAccepts multiple files."
sys.exit(0)
elif len(sys.argv) >= 2:
for i in range(1, len(sys.argv)):
files.append(sys.argv[i])
for f in files:
doc = parse(f)
hosts = doc.getElementsByTagName("host")
for host_tag in hosts:
address_tag = host_tag.getElementsByTagName("address")[0]
ip_addy = address_tag.getAttribute("addr")
hostname = "Unknown"
try:
hostname_tag = host_tag.getElementsByTagName("hostname")[0]
hostname = hostname_tag.getAttribute("name")
except:
a="a" # not all scans have hostname tags
try:
if len(host_tag.getElementsByTagName("ports"))==0:
print "No ports found"
sys.exit(-1)
_ports = host_tag.getElementsByTagName("ports")[0]
ports = _ports.getElementsByTagName("port")
if hasOpenPort(ports)==1:
## Display IP and hostname information
print "Scan Results for " + ip_addy + "\n"
print "Registered hostnames: " + hostname + "\n"
print "Port,State,Service,Product,Version,Extra"
for port in ports:
portnum = port.getAttribute("portid")
protocol = port.getAttribute("protocol")
if len(port.getElementsByTagName("state")) > 0:
state_tag = port.getElementsByTagName("state")[0]
state = state_tag.getAttribute("state")
if cmp(state, "open") == 0:
port_protocol = port.getAttribute("protocol")
service_name = "Unknown"
service_product = "Unknown"
service_version = "Unknown"
if len(port.getElementsByTagName("service")) !=0:
service_tag = port.getElementsByTagName("service")[0]
service_name = service_tag.getAttribute("name")
service_product = service_tag.getAttribute("product")
service_version = service_tag.getAttribute("version")
print "\"" + portnum + "\",\"" + state + "\",\"" + service_name + "\",\"" + service_product + ",\"" + service_version + "\""
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
if __name__ == "__main__":
main()
| 37.386792 | 156 | 0.571284 |
f27be19b17cff36f9c3d1e71eade08fe4f7ade36 | 4,017 | py | Python | neutron/plugins/openvswitch/common/config.py | ifzing/neutron | 446b13b1c9161048397a3f60306aeead432adbd8 | [
"Apache-2.0"
] | null | null | null | neutron/plugins/openvswitch/common/config.py | ifzing/neutron | 446b13b1c9161048397a3f60306aeead432adbd8 | [
"Apache-2.0"
] | null | null | null | neutron/plugins/openvswitch/common/config.py | ifzing/neutron | 446b13b1c9161048397a3f60306aeead432adbd8 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.agent.common import config
from neutron.plugins.openvswitch.common import constants
DEFAULT_BRIDGE_MAPPINGS = []
DEFAULT_VLAN_RANGES = []
DEFAULT_TUNNEL_RANGES = []
DEFAULT_TUNNEL_TYPES = []
ovs_opts = [
cfg.StrOpt('integration_bridge', default='br-int',
help=_("Integration bridge to use")),
cfg.BoolOpt('enable_tunneling', default=False,
help=_("Enable tunneling support")),
cfg.StrOpt('tunnel_bridge', default='br-tun',
help=_("Tunnel bridge to use")),
cfg.StrOpt('int_peer_patch_port', default='patch-tun',
help=_("Peer patch port in integration bridge for tunnel "
"bridge")),
cfg.StrOpt('tun_peer_patch_port', default='patch-int',
help=_("Peer patch port in tunnel bridge for integration "
"bridge")),
cfg.StrOpt('local_ip', default='',
help=_("Local IP address of GRE tunnel endpoints.")),
cfg.ListOpt('bridge_mappings',
default=DEFAULT_BRIDGE_MAPPINGS,
help=_("List of <physical_network>:<bridge>")),
cfg.StrOpt('tenant_network_type', default='local',
help=_("Network type for tenant networks "
"(local, vlan, gre, vxlan, or none)")),
cfg.ListOpt('network_vlan_ranges',
default=DEFAULT_VLAN_RANGES,
help=_("List of <physical_network>:<vlan_min>:<vlan_max> "
"or <physical_network>")),
cfg.ListOpt('tunnel_id_ranges',
default=DEFAULT_TUNNEL_RANGES,
help=_("List of <tun_min>:<tun_max>")),
cfg.StrOpt('tunnel_type', default='',
help=_("The type of tunnels to use when utilizing tunnels, "
"either 'gre' or 'vxlan'")),
]
agent_opts = [
cfg.IntOpt('polling_interval', default=2,
help=_("The number of seconds the agent will wait between "
"polling for local device changes.")),
cfg.BoolOpt('minimize_polling',
default=True,
help=_("Minimize polling by monitoring ovsdb for interface "
"changes.")),
cfg.IntOpt('ovsdb_monitor_respawn_interval',
default=constants.DEFAULT_OVSDBMON_RESPAWN,
help=_("The number of seconds to wait before respawning the "
"ovsdb monitor after losing communication with it")),
cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES,
help=_("Network types supported by the agent "
"(gre and/or vxlan)")),
cfg.IntOpt('vxlan_udp_port', default=constants.VXLAN_UDP_PORT,
help=_("The UDP port to use for VXLAN tunnels.")),
cfg.IntOpt('veth_mtu',
help=_("MTU size of veth interfaces")),
cfg.BoolOpt('l2_population', default=False,
help=_("Use ml2 l2population mechanism driver to learn "
"remote mac and IPs and improve tunnel scalability")),
cfg.BoolOpt('arp_responder', default=False,
help=_("Enable local ARP responder if it is supported")),
]
cfg.CONF.register_opts(ovs_opts, "OVS")
cfg.CONF.register_opts(agent_opts, "AGENT")
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
| 43.663043 | 78 | 0.630321 |
c0d56d9b547e9d84c30a75ec4f27b9e0e9653a35 | 4,906 | py | Python | src/dataloader.py | boostcampaitech2/model-optimization-level3-nlp-14 | b6f561bae7bd991ac7789bbdada7d6561c68092b | [
"MIT"
] | 3 | 2021-12-23T01:31:38.000Z | 2021-12-26T15:30:31.000Z | src/dataloader.py | boostcampaitech2/model-optimization-level3-nlp-14 | b6f561bae7bd991ac7789bbdada7d6561c68092b | [
"MIT"
] | 1 | 2021-12-23T01:31:21.000Z | 2021-12-23T01:31:21.000Z | src/dataloader.py | boostcampaitech2/model-optimization-level3-nlp-14 | b6f561bae7bd991ac7789bbdada7d6561c68092b | [
"MIT"
] | null | null | null | """Tune Model.
- Author: Junghoon Kim, Jongkuk Lim, Jimyeong Kim
- Contact: placidus36@gmail.com, lim.jeikei@gmail.com, wlaud1001@snu.ac.kr
- Reference
https://github.com/j-marple-dev/model_compression
"""
import glob
import os
from typing import Any, Dict, List, Tuple, Union
import torch
import yaml
from torch.utils.data import DataLoader, random_split
from torchvision.datasets import ImageFolder, VisionDataset
from src.utils.data import weights_for_balanced_classes
from src.utils.torch_utils import split_dataset_index, subset_sampler
def create_dataloader(
config: Dict[str, Any],
) -> Tuple[DataLoader, DataLoader, DataLoader]:
"""Simple dataloader.
Args:
cfg: yaml file path or dictionary type of the data.
Returns:
train_loader
valid_loader
test_loader
"""
# Data Setup
train_dataset, val_dataset, test_dataset = get_dataset(
data_path=config["DATA_PATH"],
dataset_name=config["DATASET"],
img_size=config["IMG_SIZE"],
val_ratio=config["VAL_RATIO"],
transform_train=config["AUG_TRAIN"],
transform_test=config["AUG_TEST"],
transform_train_params=config["AUG_TRAIN_PARAMS"],
transform_test_params=config.get("AUG_TEST_PARAMS"),
subset_sampling_ratio=config["SUBSET_SAMPLING_RATIO"] if config.get("SUBSET_SAMPLING_RATIO") is not None else 0.0,
)
return get_dataloader(
train_dataset=train_dataset,
val_dataset=val_dataset,
test_dataset=test_dataset,
batch_size=config["BATCH_SIZE"],
)
def get_dataset(
data_path: str = "./save/data",
dataset_name: str = "CIFAR10",
img_size: float = 32,
val_ratio: float=0.2,
transform_train: str = "simple_augment_train",
transform_test: str = "simple_augment_test",
transform_train_params: Dict[str, int] = None,
transform_test_params: Dict[str, int] = None,
subset_sampling_ratio: float = 0.0,
) -> Tuple[VisionDataset, VisionDataset, VisionDataset]:
"""Get dataset for training and testing."""
if not transform_train_params:
transform_train_params = dict()
if not transform_test_params:
transform_test_params = dict()
# preprocessing policies
transform_train = getattr(
__import__("src.augmentation.policies", fromlist=[""]),
transform_train,
)(dataset=dataset_name, img_size=img_size, **transform_train_params)
transform_test = getattr(
__import__("src.augmentation.policies", fromlist=[""]),
transform_test,
)(dataset=dataset_name, img_size=img_size, **transform_test_params)
label_weights = None
# pytorch dataset
if dataset_name == "TACO":
train_path = os.path.join(data_path, "train")
val_path = os.path.join(data_path, "val")
test_path = os.path.join(data_path, "test")
train_dataset = ImageFolder(root=train_path, transform=transform_train)
if subset_sampling_ratio > 0:
train_dataset = subset_sampler(train_dataset, subset_sampling_ratio)
val_dataset = ImageFolder(root=val_path, transform=transform_test)
test_dataset = ImageFolder(root=test_path, transform=transform_test)
else:
Dataset = getattr(
__import__("torchvision.datasets", fromlist=[""]), dataset_name
)
train_dataset = Dataset(
root=data_path, train=True, download=True, transform=transform_train
)
if subset_sampling_ratio > 0:
train_dataset = subset_sampler(train_dataset, subset_sampling_ratio)
# from train dataset, train: 80%, val: 20%
train_length = int(len(train_dataset) * (1.0-val_ratio))
train_dataset, val_dataset = random_split(
train_dataset, [train_length, len(train_dataset) - train_length]
)
test_dataset = Dataset(
root=data_path, train=False, download=False, transform=transform_test
)
return train_dataset, val_dataset, test_dataset
def get_dataloader(
train_dataset: VisionDataset,
val_dataset: VisionDataset,
test_dataset: VisionDataset,
batch_size: int,
) -> Tuple[DataLoader, DataLoader, DataLoader]:
"""Get dataloader for training and testing."""
train_loader = DataLoader(
dataset=train_dataset,
pin_memory=(torch.cuda.is_available()),
shuffle=True,
batch_size=batch_size,
num_workers=10,
drop_last=True
)
valid_loader = DataLoader(
dataset=val_dataset,
pin_memory=(torch.cuda.is_available()),
shuffle=False,
batch_size=batch_size,
num_workers=5
)
test_loader = DataLoader(
dataset=test_dataset,
pin_memory=(torch.cuda.is_available()),
shuffle=False,
batch_size=batch_size,
num_workers=5
)
return train_loader, valid_loader, test_loader
| 32.706667 | 122 | 0.681818 |
c45fc9bfdd03f7bb9e2044a98cd2f5b40fcdd611 | 17,268 | py | Python | frappe/desk/reportview.py | 8848digital/frappe | 64b506d41ffde1257201fcbfc56de634cdef622b | [
"MIT"
] | null | null | null | frappe/desk/reportview.py | 8848digital/frappe | 64b506d41ffde1257201fcbfc56de634cdef622b | [
"MIT"
] | null | null | null | frappe/desk/reportview.py | 8848digital/frappe | 64b506d41ffde1257201fcbfc56de634cdef622b | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
"""build query for doclistview and return results"""
import frappe, json
import frappe.permissions
from frappe.model.db_query import DatabaseQuery
from frappe.model import default_fields, optional_fields, child_table_fields
from frappe import _
from io import StringIO
from frappe.core.doctype.access_log.access_log import make_access_log
from frappe.utils import cstr, format_duration
from frappe.model.base_document import get_controller
from frappe.utils import add_user_info
@frappe.whitelist()
@frappe.read_only()
def get():
args = get_form_params()
# If virtual doctype get data from controller het_list method
if is_virtual_doctype(args.doctype):
controller = get_controller(args.doctype)
data = compress(controller(args.doctype).get_list(args))
else:
data = compress(execute(**args), args=args)
return data
@frappe.whitelist()
@frappe.read_only()
def get_list():
args = get_form_params()
if is_virtual_doctype(args.doctype):
controller = get_controller(args.doctype)
data = controller(args.doctype).get_list(args)
else:
# uncompressed (refactored from frappe.model.db_query.get_list)
data = execute(**args)
return data
@frappe.whitelist()
@frappe.read_only()
def get_count():
args = get_form_params()
if is_virtual_doctype(args.doctype):
controller = get_controller(args.doctype)
data = controller(args.doctype).get_count(args)
else:
distinct = 'distinct ' if args.distinct=='true' else ''
args.fields = [f"count({distinct}`tab{args.doctype}`.name) as total_count"]
data = execute(**args)[0].get('total_count')
return data
def execute(doctype, *args, **kwargs):
return DatabaseQuery(doctype).execute(*args, **kwargs)
def get_form_params():
"""Stringify GET request parameters."""
data = frappe._dict(frappe.local.form_dict)
clean_params(data)
validate_args(data)
return data
def validate_args(data):
parse_json(data)
setup_group_by(data)
validate_fields(data)
if data.filters:
validate_filters(data, data.filters)
if data.or_filters:
validate_filters(data, data.or_filters)
data.strict = None
return data
def validate_fields(data):
wildcard = update_wildcard_field_param(data)
for field in data.fields or []:
fieldname = extract_fieldname(field)
if is_standard(fieldname):
continue
meta, df = get_meta_and_docfield(fieldname, data)
if not df:
if wildcard:
continue
else:
raise_invalid_field(fieldname)
# remove the field from the query if the report hide flag is set and current view is Report
if df.report_hide and data.view == 'Report':
data.fields.remove(field)
continue
if df.fieldname in [_df.fieldname for _df in meta.get_high_permlevel_fields()]:
if df.get('permlevel') not in meta.get_permlevel_access(parenttype=data.doctype):
data.fields.remove(field)
def validate_filters(data, filters):
if isinstance(filters, list):
# filters as list
for condition in filters:
if len(condition)==3:
# [fieldname, condition, value]
fieldname = condition[0]
if is_standard(fieldname):
continue
meta, df = get_meta_and_docfield(fieldname, data)
if not df:
raise_invalid_field(condition[0])
else:
# [doctype, fieldname, condition, value]
fieldname = condition[1]
if is_standard(fieldname):
continue
meta = frappe.get_meta(condition[0])
if not meta.get_field(fieldname):
raise_invalid_field(fieldname)
else:
for fieldname in filters:
if is_standard(fieldname):
continue
meta, df = get_meta_and_docfield(fieldname, data)
if not df:
raise_invalid_field(fieldname)
def setup_group_by(data):
'''Add columns for aggregated values e.g. count(name)'''
if data.group_by and data.aggregate_function:
if data.aggregate_function.lower() not in ('count', 'sum', 'avg'):
frappe.throw(_('Invalid aggregate function'))
if frappe.db.has_column(data.aggregate_on_doctype, data.aggregate_on_field):
data.fields.append('{aggregate_function}(`tab{aggregate_on_doctype}`.`{aggregate_on_field}`) AS _aggregate_column'.format(**data))
if data.aggregate_on_field:
data.fields.append(f"`tab{data.aggregate_on_doctype}`.`{data.aggregate_on_field}`")
else:
raise_invalid_field(data.aggregate_on_field)
data.pop('aggregate_on_doctype')
data.pop('aggregate_on_field')
data.pop('aggregate_function')
def raise_invalid_field(fieldname):
frappe.throw(_('Field not permitted in query') + ': {0}'.format(fieldname), frappe.DataError)
def is_standard(fieldname):
if '.' in fieldname:
parenttype, fieldname = get_parenttype_and_fieldname(fieldname, None)
return fieldname in default_fields or fieldname in optional_fields or fieldname in child_table_fields
def extract_fieldname(field):
for text in (',', '/*', '#'):
if text in field:
raise_invalid_field(field)
fieldname = field
for sep in (' as ', ' AS '):
if sep in fieldname:
fieldname = fieldname.split(sep)[0]
# certain functions allowed, extract the fieldname from the function
if (fieldname.startswith('count(')
or fieldname.startswith('sum(')
or fieldname.startswith('avg(')):
if not fieldname.strip().endswith(')'):
raise_invalid_field(field)
fieldname = fieldname.split('(', 1)[1][:-1]
return fieldname
def get_meta_and_docfield(fieldname, data):
parenttype, fieldname = get_parenttype_and_fieldname(fieldname, data)
meta = frappe.get_meta(parenttype)
df = meta.get_field(fieldname)
return meta, df
def update_wildcard_field_param(data):
if ((isinstance(data.fields, str) and data.fields == "*")
or (isinstance(data.fields, (list, tuple)) and len(data.fields) == 1 and data.fields[0] == "*")):
data.fields = frappe.db.get_table_columns(data.doctype)
return True
return False
def clean_params(data):
for param in (
"cmd",
"data",
"ignore_permissions",
"view",
"user",
"csrf_token",
"join"
):
data.pop(param, None)
def parse_json(data):
if isinstance(data.get("filters"), str):
data["filters"] = json.loads(data["filters"])
if isinstance(data.get("or_filters"), str):
data["or_filters"] = json.loads(data["or_filters"])
if isinstance(data.get("fields"), str):
data["fields"] = json.loads(data["fields"])
if isinstance(data.get("docstatus"), str):
data["docstatus"] = json.loads(data["docstatus"])
if isinstance(data.get("save_user_settings"), str):
data["save_user_settings"] = json.loads(data["save_user_settings"])
else:
data["save_user_settings"] = True
def get_parenttype_and_fieldname(field, data):
if "." in field:
parenttype, fieldname = field.split(".")[0][4:-1], field.split(".")[1].strip("`")
else:
parenttype = data.doctype
fieldname = field.strip("`")
return parenttype, fieldname
def compress(data, args=None):
"""separate keys and values"""
from frappe.desk.query_report import add_total_row
user_info = {}
if not data: return data
if args is None:
args = {}
values = []
keys = list(data[0])
for row in data:
new_row = []
for key in keys:
new_row.append(row.get(key))
values.append(new_row)
# add user info for assignments (avatar)
if row._assign:
for user in json.loads(row._assign):
add_user_info(user, user_info)
if args.get("add_total_row"):
meta = frappe.get_meta(args.doctype)
values = add_total_row(values, keys, meta)
return {
"keys": keys,
"values": values,
"user_info": user_info
}
@frappe.whitelist()
def save_report():
"""save report"""
data = frappe.local.form_dict
if frappe.db.exists('Report', data['name']):
d = frappe.get_doc('Report', data['name'])
else:
d = frappe.new_doc('Report')
d.report_name = data['name']
d.ref_doctype = data['doctype']
d.report_type = "Report Builder"
d.json = data['json']
frappe.get_doc(d).save()
frappe.msgprint(_("{0} is saved").format(d.name), alert=True)
return d.name
@frappe.whitelist()
@frappe.read_only()
def export_query():
"""export from report builder"""
title = frappe.form_dict.title
frappe.form_dict.pop('title', None)
form_params = get_form_params()
form_params["limit_page_length"] = None
form_params["as_list"] = True
doctype = form_params.doctype
add_totals_row = None
file_format_type = form_params["file_format_type"]
title = title or doctype
del form_params["doctype"]
del form_params["file_format_type"]
if 'add_totals_row' in form_params and form_params['add_totals_row']=='1':
add_totals_row = 1
del form_params["add_totals_row"]
frappe.permissions.can_export(doctype, raise_exception=True)
if 'selected_items' in form_params:
si = json.loads(frappe.form_dict.get('selected_items'))
form_params["filters"] = {"name": ("in", si)}
del form_params["selected_items"]
make_access_log(doctype=doctype,
file_type=file_format_type,
report_name=form_params.report_name,
filters=form_params.filters)
db_query = DatabaseQuery(doctype)
ret = db_query.execute(**form_params)
if add_totals_row:
ret = append_totals_row(ret)
data = [[_('Sr')] + get_labels(db_query.fields, doctype)]
for i, row in enumerate(ret):
data.append([i+1] + list(row))
data = handle_duration_fieldtype_values(doctype, data, db_query.fields)
if file_format_type == "CSV":
# convert to csv
import csv
from frappe.utils.xlsxutils import handle_html
f = StringIO()
writer = csv.writer(f)
for r in data:
# encode only unicode type strings and not int, floats etc.
writer.writerow([handle_html(frappe.as_unicode(v)) \
if isinstance(v, str) else v for v in r])
f.seek(0)
frappe.response['result'] = cstr(f.read())
frappe.response['type'] = 'csv'
frappe.response['doctype'] = title
elif file_format_type == "Excel":
from frappe.utils.xlsxutils import make_xlsx
xlsx_file = make_xlsx(data, doctype)
frappe.response['filename'] = title + '.xlsx'
frappe.response['filecontent'] = xlsx_file.getvalue()
frappe.response['type'] = 'binary'
def append_totals_row(data):
if not data:
return data
data = list(data)
totals = []
totals.extend([""]*len(data[0]))
for row in data:
for i in range(len(row)):
if isinstance(row[i], (float, int)):
totals[i] = (totals[i] or 0) + row[i]
if not isinstance(totals[0], (int, float)):
totals[0] = 'Total'
data.append(totals)
return data
def get_labels(fields, doctype):
"""get column labels based on column names"""
labels = []
for key in fields:
key = key.split(" as ")[0]
if key.startswith(('count(', 'sum(', 'avg(')):
continue
if "." in key:
parenttype, fieldname = key.split(".")[0][4:-1], key.split(".")[1].strip("`")
else:
parenttype = doctype
fieldname = fieldname.strip("`")
if parenttype == doctype and fieldname == "name":
label = _("ID", context="Label of name column in report")
else:
df = frappe.get_meta(parenttype).get_field(fieldname)
label = _(df.label if df else fieldname.title())
if parenttype != doctype:
# If the column is from a child table, append the child doctype.
# For example, "Item Code (Sales Invoice Item)".
label += f" ({ _(parenttype) })"
labels.append(label)
return labels
def handle_duration_fieldtype_values(doctype, data, fields):
for field in fields:
key = field.split(" as ")[0]
if key.startswith(('count(', 'sum(', 'avg(')): continue
if "." in key:
parenttype, fieldname = key.split(".")[0][4:-1], key.split(".")[1].strip("`")
else:
parenttype = doctype
fieldname = field.strip("`")
df = frappe.get_meta(parenttype).get_field(fieldname)
if df and df.fieldtype == 'Duration':
index = fields.index(field) + 1
for i in range(1, len(data)):
val_in_seconds = data[i][index]
if val_in_seconds:
duration_val = format_duration(val_in_seconds, df.hide_days)
data[i][index] = duration_val
return data
@frappe.whitelist()
def delete_items():
"""delete selected items"""
import json
items = sorted(json.loads(frappe.form_dict.get('items')), reverse=True)
doctype = frappe.form_dict.get('doctype')
if len(items) > 10:
frappe.enqueue('frappe.desk.reportview.delete_bulk',
doctype=doctype, items=items)
else:
delete_bulk(doctype, items)
def delete_bulk(doctype, items):
for i, d in enumerate(items):
try:
frappe.delete_doc(doctype, d)
if len(items) >= 5:
frappe.publish_realtime("progress",
dict(progress=[i+1, len(items)], title=_('Deleting {0}').format(doctype), description=d),
user=frappe.session.user)
# Commit after successful deletion
frappe.db.commit()
except Exception:
# rollback if any record failed to delete
# if not rollbacked, queries get committed on after_request method in app.py
frappe.db.rollback()
@frappe.whitelist()
@frappe.read_only()
def get_sidebar_stats(stats, doctype, filters=None):
if filters is None:
filters = []
if is_virtual_doctype(doctype):
controller = get_controller(doctype)
args = {"stats": stats, "filters": filters}
data = controller(doctype).get_stats(args)
else:
data = get_stats(stats, doctype, filters)
return {"stats": data}
@frappe.whitelist()
@frappe.read_only()
def get_stats(stats, doctype, filters=None):
"""get tag info"""
import json
if filters is None:
filters = []
tags = json.loads(stats)
if filters:
filters = json.loads(filters)
stats = {}
try:
columns = frappe.db.get_table_columns(doctype)
except (frappe.db.InternalError, frappe.db.ProgrammingError):
# raised when _user_tags column is added on the fly
# raised if its a virtual doctype
columns = []
for tag in tags:
if not tag in columns: continue
try:
tag_count = frappe.get_list(doctype,
fields=[tag, "count(*)"],
filters=filters + [[tag, '!=', '']],
group_by=tag,
as_list=True,
distinct=1,
)
if tag == '_user_tags':
stats[tag] = scrub_user_tags(tag_count)
no_tag_count = frappe.get_list(doctype,
fields=[tag, "count(*)"],
filters=filters + [[tag, "in", ('', ',')]],
as_list=True,
group_by=tag,
order_by=tag,
)
no_tag_count = no_tag_count[0][1] if no_tag_count else 0
stats[tag].append([_("No Tags"), no_tag_count])
else:
stats[tag] = tag_count
except frappe.db.SQLError:
pass
except frappe.db.InternalError as e:
# raised when _user_tags column is added on the fly
pass
return stats
@frappe.whitelist()
def get_filter_dashboard_data(stats, doctype, filters=None):
"""get tags info"""
import json
tags = json.loads(stats)
filters = json.loads(filters or [])
stats = {}
columns = frappe.db.get_table_columns(doctype)
for tag in tags:
if not tag["name"] in columns: continue
tagcount = []
if tag["type"] not in ['Date', 'Datetime']:
tagcount = frappe.get_list(doctype,
fields=[tag["name"], "count(*)"],
filters = filters + ["ifnull(`%s`,'')!=''" % tag["name"]],
group_by = tag["name"],
as_list = True)
if tag["type"] not in ['Check','Select','Date','Datetime','Int',
'Float','Currency','Percent'] and tag['name'] not in ['docstatus']:
stats[tag["name"]] = list(tagcount)
if stats[tag["name"]]:
data =["No Data", frappe.get_list(doctype,
fields=[tag["name"], "count(*)"],
filters=filters + ["({0} = '' or {0} is null)".format(tag["name"])],
as_list=True)[0][1]]
if data and data[1]!=0:
stats[tag["name"]].append(data)
else:
stats[tag["name"]] = tagcount
return stats
def scrub_user_tags(tagcount):
"""rebuild tag list for tags"""
rdict = {}
tagdict = dict(tagcount)
for t in tagdict:
if not t:
continue
alltags = t.split(',')
for tag in alltags:
if tag:
if not tag in rdict:
rdict[tag] = 0
rdict[tag] += tagdict[t]
rlist = []
for tag in rdict:
rlist.append([tag, rdict[tag]])
return rlist
# used in building query in queries.py
def get_match_cond(doctype, as_condition=True):
cond = DatabaseQuery(doctype).build_match_conditions(as_condition=as_condition)
if not as_condition:
return cond
return ((' and ' + cond) if cond else "").replace("%", "%%")
def build_match_conditions(doctype, user=None, as_condition=True):
match_conditions = DatabaseQuery(doctype, user=user).build_match_conditions(as_condition=as_condition)
if as_condition:
return match_conditions.replace("%", "%%")
else:
return match_conditions
def get_filters_cond(doctype, filters, conditions, ignore_permissions=None, with_match_conditions=False):
if isinstance(filters, str):
filters = json.loads(filters)
if filters:
flt = filters
if isinstance(filters, dict):
filters = filters.items()
flt = []
for f in filters:
if isinstance(f[1], str) and f[1][0] == '!':
flt.append([doctype, f[0], '!=', f[1][1:]])
elif isinstance(f[1], (list, tuple)) and \
f[1][0] in (">", "<", ">=", "<=", "!=", "like", "not like", "in", "not in", "between"):
flt.append([doctype, f[0], f[1][0], f[1][1]])
else:
flt.append([doctype, f[0], '=', f[1]])
query = DatabaseQuery(doctype)
query.filters = flt
query.conditions = conditions
if with_match_conditions:
query.build_match_conditions()
query.build_filter_conditions(flt, conditions, ignore_permissions)
cond = ' and ' + ' and '.join(query.conditions)
else:
cond = ''
return cond
def is_virtual_doctype(doctype):
return frappe.db.get_value("DocType", doctype, "is_virtual")
| 27.279621 | 133 | 0.697186 |
9fb090e931e88c651d2deef701c5ad299b67ecd9 | 12,152 | py | Python | E16_DL/src/cs231n/solver.py | Jed-Z/artificial-intelligence-lab | ca5335b13e164230aab4e4a950e930b4d0d94d21 | [
"MIT"
] | 6 | 2020-11-05T04:49:10.000Z | 2022-01-06T06:17:13.000Z | E16_DL/src/cs231n/solver.py | csJed/artificial-intelligence-lab | ca5335b13e164230aab4e4a950e930b4d0d94d21 | [
"MIT"
] | null | null | null | E16_DL/src/cs231n/solver.py | csJed/artificial-intelligence-lab | ca5335b13e164230aab4e4a950e930b4d0d94d21 | [
"MIT"
] | 2 | 2020-11-30T09:14:46.000Z | 2021-10-23T01:03:37.000Z | from __future__ import print_function, division
# from future import standard_library
# standard_library.install_aliases()
from builtins import range
from builtins import object
import os
import pickle as pickle
import numpy as np
from cs231n import optim
class Solver(object):
"""
A Solver encapsulates all the logic necessary for training classification
models. The Solver performs stochastic gradient descent using different
update rules defined in optim.py.
The solver accepts both training and validataion data and labels so it can
periodically check classification accuracy on both training and validation
data to watch out for overfitting.
To train a model, you will first construct a Solver instance, passing the
model, dataset, and various optoins (learning rate, batch size, etc) to the
constructor. You will then call the train() method to run the optimization
procedure and train the model.
After the train() method returns, model.params will contain the parameters
that performed best on the validation set over the course of training.
In addition, the instance variable solver.loss_history will contain a list
of all losses encountered during training and the instance variables
solver.train_acc_history and solver.val_acc_history will be lists of the
accuracies of the model on the training and validation set at each epoch.
Example usage might look something like this:
data = {
'X_train': # training data
'y_train': # training labels
'X_val': # validation data
'y_val': # validation labels
}
model = MyAwesomeModel(hidden_size=100, reg=10)
solver = Solver(model, data,
update_rule='sgd',
optim_config={
'learning_rate': 1e-3,
},
lr_decay=0.95,
num_epochs=10, batch_size=100,
print_every=100)
solver.train()
A Solver works on a model object that must conform to the following API:
- model.params must be a dictionary mapping string parameter names to numpy
arrays containing parameter values.
- model.loss(X, y) must be a function that computes training-time loss and
gradients, and test-time classification scores, with the following inputs
and outputs:
Inputs:
- X: Array giving a minibatch of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,) giving labels for X where y[i] is the
label for X[i].
Returns:
If y is None, run a test-time forward pass and return:
- scores: Array of shape (N, C) giving classification scores for X where
scores[i, c] gives the score of class c for X[i].
If y is not None, run a training time forward and backward pass and
return a tuple of:
- loss: Scalar giving the loss
- grads: Dictionary with the same keys as self.params mapping parameter
names to gradients of the loss with respect to those parameters.
"""
def __init__(self, model, data, **kwargs):
"""
Construct a new Solver instance.
Required arguments:
- model: A model object conforming to the API described above
- data: A dictionary of training and validation data containing:
'X_train': Array, shape (N_train, d_1, ..., d_k) of training images
'X_val': Array, shape (N_val, d_1, ..., d_k) of validation images
'y_train': Array, shape (N_train,) of labels for training images
'y_val': Array, shape (N_val,) of labels for validation images
Optional arguments:
- update_rule: A string giving the name of an update rule in optim.py.
Default is 'sgd'.
- optim_config: A dictionary containing hyperparameters that will be
passed to the chosen update rule. Each update rule requires different
hyperparameters (see optim.py) but all update rules require a
'learning_rate' parameter so that should always be present.
- lr_decay: A scalar for learning rate decay; after each epoch the
learning rate is multiplied by this value.
- batch_size: Size of minibatches used to compute loss and gradient
during training.
- num_epochs: The number of epochs to run for during training.
- print_every: Integer; training losses will be printed every
print_every iterations.
- verbose: Boolean; if set to false then no output will be printed
during training.
- num_train_samples: Number of training samples used to check training
accuracy; default is 1000; set to None to use entire training set.
- num_val_samples: Number of validation samples to use to check val
accuracy; default is None, which uses the entire validation set.
- checkpoint_name: If not None, then save model checkpoints here every
epoch.
"""
self.model = model
self.X_train = data['X_train']
self.y_train = data['y_train']
self.X_val = data['X_val']
self.y_val = data['y_val']
# Unpack keyword arguments
self.update_rule = kwargs.pop('update_rule', 'sgd')
self.optim_config = kwargs.pop('optim_config', {})
self.lr_decay = kwargs.pop('lr_decay', 1.0)
self.batch_size = kwargs.pop('batch_size', 100)
self.num_epochs = kwargs.pop('num_epochs', 10)
self.num_train_samples = kwargs.pop('num_train_samples', 1000)
self.num_val_samples = kwargs.pop('num_val_samples', None)
self.checkpoint_name = kwargs.pop('checkpoint_name', None)
self.print_every = kwargs.pop('print_every', 10)
self.verbose = kwargs.pop('verbose', True)
# Throw an error if there are extra keyword arguments
if len(kwargs) > 0:
extra = ', '.join('"%s"' % k for k in list(kwargs.keys()))
raise ValueError('Unrecognized arguments %s' % extra)
# Make sure the update rule exists, then replace the string
# name with the actual function
if not hasattr(optim, self.update_rule):
raise ValueError('Invalid update_rule "%s"' % self.update_rule)
self.update_rule = getattr(optim, self.update_rule)
self._reset()
def _reset(self):
"""
Set up some book-keeping variables for optimization. Don't call this
manually.
"""
# Set up some variables for book-keeping
self.epoch = 0
self.best_val_acc = 0
self.best_params = {}
self.loss_history = []
self.train_acc_history = []
self.val_acc_history = []
# Make a deep copy of the optim_config for each parameter
self.optim_configs = {}
for p in self.model.params:
d = {k: v for k, v in self.optim_config.items()}
self.optim_configs[p] = d
def _step(self):
"""
Make a single gradient update. This is called by train() and should not
be called manually.
"""
# Make a minibatch of training data
num_train = self.X_train.shape[0]
batch_mask = np.random.choice(num_train, self.batch_size)
X_batch = self.X_train[batch_mask]
y_batch = self.y_train[batch_mask]
# Compute loss and gradient
loss, grads = self.model.loss(X_batch, y_batch)
self.loss_history.append(loss)
# Perform a parameter update
for p, w in self.model.params.items():
dw = grads[p]
config = self.optim_configs[p]
next_w, next_config = self.update_rule(w, dw, config)
self.model.params[p] = next_w
self.optim_configs[p] = next_config
def _save_checkpoint(self):
if self.checkpoint_name is None: return
checkpoint = {
'model': self.model,
'update_rule': self.update_rule,
'lr_decay': self.lr_decay,
'optim_config': self.optim_config,
'batch_size': self.batch_size,
'num_train_samples': self.num_train_samples,
'num_val_samples': self.num_val_samples,
'epoch': self.epoch,
'loss_history': self.loss_history,
'train_acc_history': self.train_acc_history,
'val_acc_history': self.val_acc_history,
}
filename = '%s_epoch_%d.pkl' % (self.checkpoint_name, self.epoch)
if self.verbose:
print('Saving checkpoint to "%s"' % filename)
with open(filename, 'wb') as f:
pickle.dump(checkpoint, f)
def check_accuracy(self, X, y, num_samples=None, batch_size=100):
"""
Check accuracy of the model on the provided data.
Inputs:
- X: Array of data, of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,)
- num_samples: If not None, subsample the data and only test the model
on num_samples datapoints.
- batch_size: Split X and y into batches of this size to avoid using
too much memory.
Returns:
- acc: Scalar giving the fraction of instances that were correctly
classified by the model.
"""
# Maybe subsample the data
N = X.shape[0]
if num_samples is not None and N > num_samples:
mask = np.random.choice(N, num_samples)
N = num_samples
X = X[mask]
y = y[mask]
# Compute predictions in batches
num_batches = N // batch_size
if N % batch_size != 0:
num_batches += 1
y_pred = []
for i in range(num_batches):
start = i * batch_size
end = (i + 1) * batch_size
scores = self.model.loss(X[start:end])
y_pred.append(np.argmax(scores, axis=1))
y_pred = np.hstack(y_pred)
acc = np.mean(y_pred == y)
return acc
def train(self):
"""
Run optimization to train the model.
"""
num_train = self.X_train.shape[0]
iterations_per_epoch = max(num_train // self.batch_size, 1)
num_iterations = self.num_epochs * iterations_per_epoch
for t in range(num_iterations):
self._step()
# Maybe print training loss
if self.verbose and t % self.print_every == 0:
print('(Iteration %d / %d) loss: %f' % (
t + 1, num_iterations, self.loss_history[-1]))
# At the end of every epoch, increment the epoch counter and decay
# the learning rate.
epoch_end = (t + 1) % iterations_per_epoch == 0
if epoch_end:
self.epoch += 1
for k in self.optim_configs:
self.optim_configs[k]['learning_rate'] *= self.lr_decay
# Check train and val accuracy on the first iteration, the last
# iteration, and at the end of each epoch.
first_it = (t == 0)
last_it = (t == num_iterations - 1)
if first_it or last_it or epoch_end:
train_acc = self.check_accuracy(self.X_train, self.y_train,
num_samples=self.num_train_samples)
val_acc = self.check_accuracy(self.X_val, self.y_val,
num_samples=self.num_val_samples)
self.train_acc_history.append(train_acc)
self.val_acc_history.append(val_acc)
self._save_checkpoint()
if self.verbose:
print('(Epoch %d / %d) train acc: %f; val_acc: %f' % (
self.epoch, self.num_epochs, train_acc, val_acc))
# Keep track of the best model
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
self.best_params = {}
for k, v in self.model.params.items():
self.best_params[k] = v.copy()
# At the end of training swap the best params into the model
self.model.params = self.best_params
| 39.583062 | 79 | 0.615454 |
384c5bfd0ae29d477b4d4d28208b5c19e9112c6b | 71,047 | py | Python | homeassistant/components/google_assistant/trait.py | edofullin/core | 106dc4d28ad59cb192c60fc7a354cafa86899ea4 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/google_assistant/trait.py | edofullin/core | 106dc4d28ad59cb192c60fc7a354cafa86899ea4 | [
"Apache-2.0"
] | 60 | 2020-07-06T15:10:30.000Z | 2022-03-31T06:01:46.000Z | homeassistant/components/google_assistant/trait.py | edofullin/core | 106dc4d28ad59cb192c60fc7a354cafa86899ea4 | [
"Apache-2.0"
] | 4 | 2017-01-10T04:17:33.000Z | 2021-09-02T16:37:24.000Z | """Implement the Google Smart Home traits."""
from __future__ import annotations
import logging
from homeassistant.components import (
alarm_control_panel,
binary_sensor,
camera,
cover,
fan,
group,
input_boolean,
input_select,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.components.humidifier import const as humidifier
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_CODE,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_MODE,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
CAST_APP_ID_HOMEASSISTANT,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
STATE_IDLE,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
STATE_STANDBY,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import DOMAIN as HA_DOMAIN
from homeassistant.helpers.network import get_url
from homeassistant.util import color as color_util, dt, temperature as temp_util
from .const import (
CHALLENGE_ACK_NEEDED,
CHALLENGE_FAILED_PIN_NEEDED,
CHALLENGE_PIN_NEEDED,
ERR_ALREADY_ARMED,
ERR_ALREADY_DISARMED,
ERR_ALREADY_STOPPED,
ERR_CHALLENGE_NOT_SETUP,
ERR_NOT_SUPPORTED,
ERR_UNSUPPORTED_INPUT,
ERR_VALUE_OUT_OF_RANGE,
)
from .error import ChallengeNeeded, SmartHomeError
_LOGGER = logging.getLogger(__name__)
PREFIX_TRAITS = "action.devices.traits."
TRAIT_CAMERA_STREAM = f"{PREFIX_TRAITS}CameraStream"
TRAIT_ONOFF = f"{PREFIX_TRAITS}OnOff"
TRAIT_DOCK = f"{PREFIX_TRAITS}Dock"
TRAIT_STARTSTOP = f"{PREFIX_TRAITS}StartStop"
TRAIT_BRIGHTNESS = f"{PREFIX_TRAITS}Brightness"
TRAIT_COLOR_SETTING = f"{PREFIX_TRAITS}ColorSetting"
TRAIT_SCENE = f"{PREFIX_TRAITS}Scene"
TRAIT_TEMPERATURE_SETTING = f"{PREFIX_TRAITS}TemperatureSetting"
TRAIT_LOCKUNLOCK = f"{PREFIX_TRAITS}LockUnlock"
TRAIT_FANSPEED = f"{PREFIX_TRAITS}FanSpeed"
TRAIT_MODES = f"{PREFIX_TRAITS}Modes"
TRAIT_INPUTSELECTOR = f"{PREFIX_TRAITS}InputSelector"
TRAIT_OPENCLOSE = f"{PREFIX_TRAITS}OpenClose"
TRAIT_VOLUME = f"{PREFIX_TRAITS}Volume"
TRAIT_ARMDISARM = f"{PREFIX_TRAITS}ArmDisarm"
TRAIT_HUMIDITY_SETTING = f"{PREFIX_TRAITS}HumiditySetting"
TRAIT_TRANSPORT_CONTROL = f"{PREFIX_TRAITS}TransportControl"
TRAIT_MEDIA_STATE = f"{PREFIX_TRAITS}MediaState"
PREFIX_COMMANDS = "action.devices.commands."
COMMAND_ONOFF = f"{PREFIX_COMMANDS}OnOff"
COMMAND_GET_CAMERA_STREAM = f"{PREFIX_COMMANDS}GetCameraStream"
COMMAND_DOCK = f"{PREFIX_COMMANDS}Dock"
COMMAND_STARTSTOP = f"{PREFIX_COMMANDS}StartStop"
COMMAND_PAUSEUNPAUSE = f"{PREFIX_COMMANDS}PauseUnpause"
COMMAND_BRIGHTNESS_ABSOLUTE = f"{PREFIX_COMMANDS}BrightnessAbsolute"
COMMAND_COLOR_ABSOLUTE = f"{PREFIX_COMMANDS}ColorAbsolute"
COMMAND_ACTIVATE_SCENE = f"{PREFIX_COMMANDS}ActivateScene"
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT = (
f"{PREFIX_COMMANDS}ThermostatTemperatureSetpoint"
)
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE = (
f"{PREFIX_COMMANDS}ThermostatTemperatureSetRange"
)
COMMAND_THERMOSTAT_SET_MODE = f"{PREFIX_COMMANDS}ThermostatSetMode"
COMMAND_LOCKUNLOCK = f"{PREFIX_COMMANDS}LockUnlock"
COMMAND_FANSPEED = f"{PREFIX_COMMANDS}SetFanSpeed"
COMMAND_MODES = f"{PREFIX_COMMANDS}SetModes"
COMMAND_INPUT = f"{PREFIX_COMMANDS}SetInput"
COMMAND_NEXT_INPUT = f"{PREFIX_COMMANDS}NextInput"
COMMAND_PREVIOUS_INPUT = f"{PREFIX_COMMANDS}PreviousInput"
COMMAND_OPENCLOSE = f"{PREFIX_COMMANDS}OpenClose"
COMMAND_OPENCLOSE_RELATIVE = f"{PREFIX_COMMANDS}OpenCloseRelative"
COMMAND_SET_VOLUME = f"{PREFIX_COMMANDS}setVolume"
COMMAND_VOLUME_RELATIVE = f"{PREFIX_COMMANDS}volumeRelative"
COMMAND_MUTE = f"{PREFIX_COMMANDS}mute"
COMMAND_ARMDISARM = f"{PREFIX_COMMANDS}ArmDisarm"
COMMAND_MEDIA_NEXT = f"{PREFIX_COMMANDS}mediaNext"
COMMAND_MEDIA_PAUSE = f"{PREFIX_COMMANDS}mediaPause"
COMMAND_MEDIA_PREVIOUS = f"{PREFIX_COMMANDS}mediaPrevious"
COMMAND_MEDIA_RESUME = f"{PREFIX_COMMANDS}mediaResume"
COMMAND_MEDIA_SEEK_RELATIVE = f"{PREFIX_COMMANDS}mediaSeekRelative"
COMMAND_MEDIA_SEEK_TO_POSITION = f"{PREFIX_COMMANDS}mediaSeekToPosition"
COMMAND_MEDIA_SHUFFLE = f"{PREFIX_COMMANDS}mediaShuffle"
COMMAND_MEDIA_STOP = f"{PREFIX_COMMANDS}mediaStop"
COMMAND_SET_HUMIDITY = f"{PREFIX_COMMANDS}SetHumidity"
TRAITS = []
def register_trait(trait):
"""Decorate a function to register a trait."""
TRAITS.append(trait)
return trait
def _google_temp_unit(units):
"""Return Google temperature unit."""
if units == TEMP_FAHRENHEIT:
return "F"
return "C"
def _next_selected(items: list[str], selected: str | None) -> str | None:
"""Return the next item in a item list starting at given value.
If selected is missing in items, None is returned
"""
try:
index = items.index(selected)
except ValueError:
return None
next_item = 0 if index == len(items) - 1 else index + 1
return items[next_item]
class _Trait:
"""Represents a Trait inside Google Assistant skill."""
commands = []
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return False
def __init__(self, hass, state, config):
"""Initialize a trait for a state."""
self.hass = hass
self.state = state
self.config = config
def sync_attributes(self):
"""Return attributes for a sync request."""
raise NotImplementedError
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
raise NotImplementedError
def can_execute(self, command, params):
"""Test if command can be executed."""
return command in self.commands
async def execute(self, command, data, params, challenge):
"""Execute a trait command."""
raise NotImplementedError
@register_trait
class BrightnessTrait(_Trait):
"""Trait to control brightness of a device.
https://developers.google.com/actions/smarthome/traits/brightness
"""
name = TRAIT_BRIGHTNESS
commands = [COMMAND_BRIGHTNESS_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == light.DOMAIN:
return features & light.SUPPORT_BRIGHTNESS
return False
def sync_attributes(self):
"""Return brightness attributes for a sync request."""
return {}
def query_attributes(self):
"""Return brightness query attributes."""
domain = self.state.domain
response = {}
if domain == light.DOMAIN:
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS)
if brightness is not None:
response["brightness"] = int(100 * (brightness / 255))
else:
response["brightness"] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute a brightness command."""
domain = self.state.domain
if domain == light.DOMAIN:
await self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_BRIGHTNESS_PCT: params["brightness"],
},
blocking=True,
context=data.context,
)
@register_trait
class CameraStreamTrait(_Trait):
"""Trait to stream from cameras.
https://developers.google.com/actions/smarthome/traits/camerastream
"""
name = TRAIT_CAMERA_STREAM
commands = [COMMAND_GET_CAMERA_STREAM]
stream_info = None
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == camera.DOMAIN:
return features & camera.SUPPORT_STREAM
return False
def sync_attributes(self):
"""Return stream attributes for a sync request."""
return {
"cameraStreamSupportedProtocols": ["hls"],
"cameraStreamNeedAuthToken": False,
"cameraStreamNeedDrmEncryption": False,
}
def query_attributes(self):
"""Return camera stream attributes."""
return self.stream_info or {}
async def execute(self, command, data, params, challenge):
"""Execute a get camera stream command."""
url = await self.hass.components.camera.async_request_stream(
self.state.entity_id, "hls"
)
self.stream_info = {
"cameraStreamAccessUrl": f"{get_url(self.hass)}{url}",
"cameraStreamReceiverAppId": CAST_APP_ID_HOMEASSISTANT,
}
@register_trait
class OnOffTrait(_Trait):
"""Trait to offer basic on and off functionality.
https://developers.google.com/actions/smarthome/traits/onoff
"""
name = TRAIT_ONOFF
commands = [COMMAND_ONOFF]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain in (
group.DOMAIN,
input_boolean.DOMAIN,
switch.DOMAIN,
fan.DOMAIN,
light.DOMAIN,
media_player.DOMAIN,
humidifier.DOMAIN,
)
def sync_attributes(self):
"""Return OnOff attributes for a sync request."""
if self.state.attributes.get(ATTR_ASSUMED_STATE, False):
return {"commandOnlyOnOff": True}
return {}
def query_attributes(self):
"""Return OnOff query attributes."""
return {"on": self.state.state not in (STATE_OFF, STATE_UNKNOWN)}
async def execute(self, command, data, params, challenge):
"""Execute an OnOff command."""
domain = self.state.domain
if domain == group.DOMAIN:
service_domain = HA_DOMAIN
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
else:
service_domain = domain
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
await self.hass.services.async_call(
service_domain,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ColorSettingTrait(_Trait):
"""Trait to offer color temperature functionality.
https://developers.google.com/actions/smarthome/traits/colortemperature
"""
name = TRAIT_COLOR_SETTING
commands = [COMMAND_COLOR_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain != light.DOMAIN:
return False
return features & light.SUPPORT_COLOR_TEMP or features & light.SUPPORT_COLOR
def sync_attributes(self):
"""Return color temperature attributes for a sync request."""
attrs = self.state.attributes
features = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
response = {}
if features & light.SUPPORT_COLOR:
response["colorModel"] = "hsv"
if features & light.SUPPORT_COLOR_TEMP:
# Max Kelvin is Min Mireds K = 1000000 / mireds
# Min Kelvin is Max Mireds K = 1000000 / mireds
response["colorTemperatureRange"] = {
"temperatureMaxK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MIN_MIREDS)
),
"temperatureMinK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MAX_MIREDS)
),
}
return response
def query_attributes(self):
"""Return color temperature query attributes."""
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
color = {}
if features & light.SUPPORT_COLOR:
color_hs = self.state.attributes.get(light.ATTR_HS_COLOR)
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS, 1)
if color_hs is not None:
color["spectrumHsv"] = {
"hue": color_hs[0],
"saturation": color_hs[1] / 100,
"value": brightness / 255,
}
if features & light.SUPPORT_COLOR_TEMP:
temp = self.state.attributes.get(light.ATTR_COLOR_TEMP)
# Some faulty integrations might put 0 in here, raising exception.
if temp == 0:
_LOGGER.warning(
"Entity %s has incorrect color temperature %s",
self.state.entity_id,
temp,
)
elif temp is not None:
color["temperatureK"] = color_util.color_temperature_mired_to_kelvin(
temp
)
response = {}
if color:
response["color"] = color
return response
async def execute(self, command, data, params, challenge):
"""Execute a color temperature command."""
if "temperature" in params["color"]:
temp = color_util.color_temperature_kelvin_to_mired(
params["color"]["temperature"]
)
min_temp = self.state.attributes[light.ATTR_MIN_MIREDS]
max_temp = self.state.attributes[light.ATTR_MAX_MIREDS]
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_COLOR_TEMP: temp},
blocking=True,
context=data.context,
)
elif "spectrumRGB" in params["color"]:
# Convert integer to hex format and left pad with 0's till length 6
hex_value = f"{params['color']['spectrumRGB']:06x}"
color = color_util.color_RGB_to_hs(
*color_util.rgb_hex_to_rgb_list(hex_value)
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_HS_COLOR: color},
blocking=True,
context=data.context,
)
elif "spectrumHSV" in params["color"]:
color = params["color"]["spectrumHSV"]
saturation = color["saturation"] * 100
brightness = color["value"] * 255
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_HS_COLOR: [color["hue"], saturation],
light.ATTR_BRIGHTNESS: brightness,
},
blocking=True,
context=data.context,
)
@register_trait
class SceneTrait(_Trait):
"""Trait to offer scene functionality.
https://developers.google.com/actions/smarthome/traits/scene
"""
name = TRAIT_SCENE
commands = [COMMAND_ACTIVATE_SCENE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain in (scene.DOMAIN, script.DOMAIN)
def sync_attributes(self):
"""Return scene attributes for a sync request."""
# Neither supported domain can support sceneReversible
return {}
def query_attributes(self):
"""Return scene query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a scene command."""
# Don't block for scripts as they can be slow.
await self.hass.services.async_call(
self.state.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=self.state.domain != script.DOMAIN,
context=data.context,
)
@register_trait
class DockTrait(_Trait):
"""Trait to offer dock functionality.
https://developers.google.com/actions/smarthome/traits/dock
"""
name = TRAIT_DOCK
commands = [COMMAND_DOCK]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == vacuum.DOMAIN
def sync_attributes(self):
"""Return dock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return dock query attributes."""
return {"isDocked": self.state.state == vacuum.STATE_DOCKED}
async def execute(self, command, data, params, challenge):
"""Execute a dock command."""
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_RETURN_TO_BASE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class StartStopTrait(_Trait):
"""Trait to offer StartStop functionality.
https://developers.google.com/actions/smarthome/traits/startstop
"""
name = TRAIT_STARTSTOP
commands = [COMMAND_STARTSTOP, COMMAND_PAUSEUNPAUSE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == vacuum.DOMAIN:
return True
if domain == cover.DOMAIN and features & cover.SUPPORT_STOP:
return True
return False
def sync_attributes(self):
"""Return StartStop attributes for a sync request."""
domain = self.state.domain
if domain == vacuum.DOMAIN:
return {
"pausable": self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& vacuum.SUPPORT_PAUSE
!= 0
}
if domain == cover.DOMAIN:
return {}
def query_attributes(self):
"""Return StartStop query attributes."""
domain = self.state.domain
state = self.state.state
if domain == vacuum.DOMAIN:
return {
"isRunning": state == vacuum.STATE_CLEANING,
"isPaused": state == vacuum.STATE_PAUSED,
}
if domain == cover.DOMAIN:
return {"isRunning": state in (cover.STATE_CLOSING, cover.STATE_OPENING)}
async def execute(self, command, data, params, challenge):
"""Execute a StartStop command."""
domain = self.state.domain
if domain == vacuum.DOMAIN:
return await self._execute_vacuum(command, data, params, challenge)
if domain == cover.DOMAIN:
return await self._execute_cover(command, data, params, challenge)
async def _execute_vacuum(self, command, data, params, challenge):
"""Execute a StartStop command."""
if command == COMMAND_STARTSTOP:
if params["start"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_STOP,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
elif command == COMMAND_PAUSEUNPAUSE:
if params["pause"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_PAUSE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
async def _execute_cover(self, command, data, params, challenge):
"""Execute a StartStop command."""
if command == COMMAND_STARTSTOP:
if params["start"] is False:
if (
self.state.state
in (
cover.STATE_CLOSING,
cover.STATE_OPENING,
)
or self.state.attributes.get(ATTR_ASSUMED_STATE)
):
await self.hass.services.async_call(
self.state.domain,
cover.SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
raise SmartHomeError(
ERR_ALREADY_STOPPED, "Cover is already stopped"
)
else:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Starting a cover is not supported"
)
else:
raise SmartHomeError(
ERR_NOT_SUPPORTED, f"Command {command} is not supported"
)
@register_trait
class TemperatureSettingTrait(_Trait):
"""Trait to offer handling both temperature point and modes functionality.
https://developers.google.com/actions/smarthome/traits/temperaturesetting
"""
name = TRAIT_TEMPERATURE_SETTING
commands = [
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT,
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE,
COMMAND_THERMOSTAT_SET_MODE,
]
# We do not support "on" as we are unable to know how to restore
# the last mode.
hvac_to_google = {
climate.HVAC_MODE_HEAT: "heat",
climate.HVAC_MODE_COOL: "cool",
climate.HVAC_MODE_OFF: "off",
climate.HVAC_MODE_AUTO: "auto",
climate.HVAC_MODE_HEAT_COOL: "heatcool",
climate.HVAC_MODE_FAN_ONLY: "fan-only",
climate.HVAC_MODE_DRY: "dry",
}
google_to_hvac = {value: key for key, value in hvac_to_google.items()}
preset_to_google = {climate.PRESET_ECO: "eco"}
google_to_preset = {value: key for key, value in preset_to_google.items()}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == climate.DOMAIN:
return True
return (
domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_TEMPERATURE
)
@property
def climate_google_modes(self):
"""Return supported Google modes."""
modes = []
attrs = self.state.attributes
for mode in attrs.get(climate.ATTR_HVAC_MODES, []):
google_mode = self.hvac_to_google.get(mode)
if google_mode and google_mode not in modes:
modes.append(google_mode)
for preset in attrs.get(climate.ATTR_PRESET_MODES, []):
google_mode = self.preset_to_google.get(preset)
if google_mode and google_mode not in modes:
modes.append(google_mode)
return modes
def sync_attributes(self):
"""Return temperature point and modes attributes for a sync request."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
response["thermostatTemperatureUnit"] = _google_temp_unit(
self.hass.config.units.temperature_unit
)
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_TEMPERATURE:
response["queryOnlyTemperatureSetting"] = True
elif domain == climate.DOMAIN:
modes = self.climate_google_modes
# Some integrations don't support modes (e.g. opentherm), but Google doesn't
# support changing the temperature if we don't have any modes. If there's
# only one Google doesn't support changing it, so the default mode here is
# only cosmetic.
if len(modes) == 0:
modes.append("heat")
if "off" in modes and any(
mode in modes for mode in ("heatcool", "heat", "cool")
):
modes.append("on")
response["availableThermostatModes"] = modes
return response
def query_attributes(self):
"""Return temperature point and modes query attributes."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
unit = self.hass.config.units.temperature_unit
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_TEMPERATURE:
current_temp = self.state.state
if current_temp not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
response["thermostatTemperatureAmbient"] = round(
temp_util.convert(float(current_temp), unit, TEMP_CELSIUS), 1
)
elif domain == climate.DOMAIN:
operation = self.state.state
preset = attrs.get(climate.ATTR_PRESET_MODE)
supported = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
if preset in self.preset_to_google:
response["thermostatMode"] = self.preset_to_google[preset]
else:
response["thermostatMode"] = self.hvac_to_google.get(operation)
current_temp = attrs.get(climate.ATTR_CURRENT_TEMPERATURE)
if current_temp is not None:
response["thermostatTemperatureAmbient"] = round(
temp_util.convert(current_temp, unit, TEMP_CELSIUS), 1
)
current_humidity = attrs.get(climate.ATTR_CURRENT_HUMIDITY)
if current_humidity is not None:
response["thermostatHumidityAmbient"] = current_humidity
if operation in (climate.HVAC_MODE_AUTO, climate.HVAC_MODE_HEAT_COOL):
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
response["thermostatTemperatureSetpointHigh"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_HIGH], unit, TEMP_CELSIUS
),
1,
)
response["thermostatTemperatureSetpointLow"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_LOW], unit, TEMP_CELSIUS
),
1,
)
else:
target_temp = attrs.get(ATTR_TEMPERATURE)
if target_temp is not None:
target_temp = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
response["thermostatTemperatureSetpointHigh"] = target_temp
response["thermostatTemperatureSetpointLow"] = target_temp
else:
target_temp = attrs.get(ATTR_TEMPERATURE)
if target_temp is not None:
response["thermostatTemperatureSetpoint"] = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
return response
async def execute(self, command, data, params, challenge):
"""Execute a temperature point or mode command."""
domain = self.state.domain
if domain == sensor.DOMAIN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Execute is not supported by sensor"
)
# All sent in temperatures are always in Celsius
unit = self.hass.config.units.temperature_unit
min_temp = self.state.attributes[climate.ATTR_MIN_TEMP]
max_temp = self.state.attributes[climate.ATTR_MAX_TEMP]
if command == COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT:
temp = temp_util.convert(
params["thermostatTemperatureSetpoint"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp = round(temp)
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: self.state.entity_id, ATTR_TEMPERATURE: temp},
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE:
temp_high = temp_util.convert(
params["thermostatTemperatureSetpointHigh"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_high = round(temp_high)
if temp_high < min_temp or temp_high > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Upper bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
temp_low = temp_util.convert(
params["thermostatTemperatureSetpointLow"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_low = round(temp_low)
if temp_low < min_temp or temp_low > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Lower bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
svc_data = {ATTR_ENTITY_ID: self.state.entity_id}
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
svc_data[climate.ATTR_TARGET_TEMP_HIGH] = temp_high
svc_data[climate.ATTR_TARGET_TEMP_LOW] = temp_low
else:
svc_data[ATTR_TEMPERATURE] = (temp_high + temp_low) / 2
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
svc_data,
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_SET_MODE:
target_mode = params["thermostatMode"]
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
if target_mode == "on":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode == "off":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode in self.google_to_preset:
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_PRESET_MODE,
{
climate.ATTR_PRESET_MODE: self.google_to_preset[target_mode],
ATTR_ENTITY_ID: self.state.entity_id,
},
blocking=True,
context=data.context,
)
return
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_HVAC_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
climate.ATTR_HVAC_MODE: self.google_to_hvac[target_mode],
},
blocking=True,
context=data.context,
)
@register_trait
class HumiditySettingTrait(_Trait):
"""Trait to offer humidity setting functionality.
https://developers.google.com/actions/smarthome/traits/humiditysetting
"""
name = TRAIT_HUMIDITY_SETTING
commands = [COMMAND_SET_HUMIDITY]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == humidifier.DOMAIN:
return True
return domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_HUMIDITY
def sync_attributes(self):
"""Return humidity attributes for a sync request."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
response["queryOnlyHumiditySetting"] = True
elif domain == humidifier.DOMAIN:
response["humiditySetpointRange"] = {
"minPercent": round(
float(self.state.attributes[humidifier.ATTR_MIN_HUMIDITY])
),
"maxPercent": round(
float(self.state.attributes[humidifier.ATTR_MAX_HUMIDITY])
),
}
return response
def query_attributes(self):
"""Return humidity query attributes."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
current_humidity = self.state.state
if current_humidity not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
response["humidityAmbientPercent"] = round(float(current_humidity))
elif domain == humidifier.DOMAIN:
target_humidity = attrs.get(humidifier.ATTR_HUMIDITY)
if target_humidity is not None:
response["humiditySetpointPercent"] = round(float(target_humidity))
return response
async def execute(self, command, data, params, challenge):
"""Execute a humidity command."""
domain = self.state.domain
if domain == sensor.DOMAIN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Execute is not supported by sensor"
)
if command == COMMAND_SET_HUMIDITY:
await self.hass.services.async_call(
humidifier.DOMAIN,
humidifier.SERVICE_SET_HUMIDITY,
{
ATTR_ENTITY_ID: self.state.entity_id,
humidifier.ATTR_HUMIDITY: params["humidity"],
},
blocking=True,
context=data.context,
)
@register_trait
class LockUnlockTrait(_Trait):
"""Trait to lock or unlock a lock.
https://developers.google.com/actions/smarthome/traits/lockunlock
"""
name = TRAIT_LOCKUNLOCK
commands = [COMMAND_LOCKUNLOCK]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == lock.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
def sync_attributes(self):
"""Return LockUnlock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return LockUnlock query attributes."""
return {"isLocked": self.state.state == STATE_LOCKED}
async def execute(self, command, data, params, challenge):
"""Execute an LockUnlock command."""
if params["lock"]:
service = lock.SERVICE_LOCK
else:
_verify_pin_challenge(data, self.state, challenge)
service = lock.SERVICE_UNLOCK
await self.hass.services.async_call(
lock.DOMAIN,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ArmDisArmTrait(_Trait):
"""Trait to Arm or Disarm a Security System.
https://developers.google.com/actions/smarthome/traits/armdisarm
"""
name = TRAIT_ARMDISARM
commands = [COMMAND_ARMDISARM]
state_to_service = {
STATE_ALARM_ARMED_HOME: SERVICE_ALARM_ARM_HOME,
STATE_ALARM_ARMED_AWAY: SERVICE_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_NIGHT: SERVICE_ALARM_ARM_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS: SERVICE_ALARM_ARM_CUSTOM_BYPASS,
STATE_ALARM_TRIGGERED: SERVICE_ALARM_TRIGGER,
}
state_to_support = {
STATE_ALARM_ARMED_HOME: alarm_control_panel.const.SUPPORT_ALARM_ARM_HOME,
STATE_ALARM_ARMED_AWAY: alarm_control_panel.const.SUPPORT_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_NIGHT: alarm_control_panel.const.SUPPORT_ALARM_ARM_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS: alarm_control_panel.const.SUPPORT_ALARM_ARM_CUSTOM_BYPASS,
STATE_ALARM_TRIGGERED: alarm_control_panel.const.SUPPORT_ALARM_TRIGGER,
}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == alarm_control_panel.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
def _supported_states(self):
"""Return supported states."""
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
return [
state
for state, required_feature in self.state_to_support.items()
if features & required_feature != 0
]
def sync_attributes(self):
"""Return ArmDisarm attributes for a sync request."""
response = {}
levels = []
for state in self._supported_states():
# level synonyms are generated from state names
# 'armed_away' becomes 'armed away' or 'away'
level_synonym = [state.replace("_", " ")]
if state != STATE_ALARM_TRIGGERED:
level_synonym.append(state.split("_")[1])
level = {
"level_name": state,
"level_values": [{"level_synonym": level_synonym, "lang": "en"}],
}
levels.append(level)
response["availableArmLevels"] = {"levels": levels, "ordered": False}
return response
def query_attributes(self):
"""Return ArmDisarm query attributes."""
if "next_state" in self.state.attributes:
armed_state = self.state.attributes["next_state"]
else:
armed_state = self.state.state
response = {"isArmed": armed_state in self.state_to_service}
if response["isArmed"]:
response.update({"currentArmLevel": armed_state})
return response
async def execute(self, command, data, params, challenge):
"""Execute an ArmDisarm command."""
if params["arm"] and not params.get("cancel"):
arm_level = params.get("armLevel")
# If no arm level given, we can only arm it if there is
# only one supported arm type. We never default to triggered.
if not arm_level:
states = self._supported_states()
if STATE_ALARM_TRIGGERED in states:
states.remove(STATE_ALARM_TRIGGERED)
if len(states) != 1:
raise SmartHomeError(ERR_NOT_SUPPORTED, "ArmLevel missing")
arm_level = states[0]
if self.state.state == arm_level:
raise SmartHomeError(ERR_ALREADY_ARMED, "System is already armed")
if self.state.attributes["code_arm_required"]:
_verify_pin_challenge(data, self.state, challenge)
service = self.state_to_service[arm_level]
# disarm the system without asking for code when
# 'cancel' arming action is received while current status is pending
elif (
params["arm"]
and params.get("cancel")
and self.state.state == STATE_ALARM_PENDING
):
service = SERVICE_ALARM_DISARM
else:
if self.state.state == STATE_ALARM_DISARMED:
raise SmartHomeError(ERR_ALREADY_DISARMED, "System is already disarmed")
_verify_pin_challenge(data, self.state, challenge)
service = SERVICE_ALARM_DISARM
await self.hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{
ATTR_ENTITY_ID: self.state.entity_id,
ATTR_CODE: data.config.secure_devices_pin,
},
blocking=True,
context=data.context,
)
@register_trait
class FanSpeedTrait(_Trait):
"""Trait to control speed of Fan.
https://developers.google.com/actions/smarthome/traits/fanspeed
"""
name = TRAIT_FANSPEED
commands = [COMMAND_FANSPEED]
speed_synonyms = {
fan.SPEED_OFF: ["stop", "off"],
fan.SPEED_LOW: ["slow", "low", "slowest", "lowest"],
fan.SPEED_MEDIUM: ["medium", "mid", "middle"],
fan.SPEED_HIGH: ["high", "max", "fast", "highest", "fastest", "maximum"],
}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == fan.DOMAIN:
return features & fan.SUPPORT_SET_SPEED
if domain == climate.DOMAIN:
return features & climate.SUPPORT_FAN_MODE
return False
def sync_attributes(self):
"""Return speed point and modes attributes for a sync request."""
domain = self.state.domain
speeds = []
reversible = False
if domain == fan.DOMAIN:
modes = self.state.attributes.get(fan.ATTR_SPEED_LIST, [])
for mode in modes:
if mode not in self.speed_synonyms:
continue
speed = {
"speed_name": mode,
"speed_values": [
{"speed_synonym": self.speed_synonyms.get(mode), "lang": "en"}
],
}
speeds.append(speed)
reversible = bool(
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& fan.SUPPORT_DIRECTION
)
elif domain == climate.DOMAIN:
modes = self.state.attributes.get(climate.ATTR_FAN_MODES, [])
for mode in modes:
speed = {
"speed_name": mode,
"speed_values": [{"speed_synonym": [mode], "lang": "en"}],
}
speeds.append(speed)
return {
"availableFanSpeeds": {"speeds": speeds, "ordered": True},
"reversible": reversible,
"supportsFanSpeedPercent": True,
}
def query_attributes(self):
"""Return speed point and modes query attributes."""
attrs = self.state.attributes
domain = self.state.domain
response = {}
if domain == climate.DOMAIN:
speed = attrs.get(climate.ATTR_FAN_MODE)
if speed is not None:
response["currentFanSpeedSetting"] = speed
if domain == fan.DOMAIN:
speed = attrs.get(fan.ATTR_SPEED)
percent = attrs.get(fan.ATTR_PERCENTAGE) or 0
if speed is not None:
response["on"] = speed != fan.SPEED_OFF
response["currentFanSpeedSetting"] = speed
response["currentFanSpeedPercent"] = percent
return response
async def execute(self, command, data, params, challenge):
"""Execute an SetFanSpeed command."""
domain = self.state.domain
if domain == climate.DOMAIN:
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_FAN_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
climate.ATTR_FAN_MODE: params["fanSpeed"],
},
blocking=True,
context=data.context,
)
if domain == fan.DOMAIN:
service_params = {
ATTR_ENTITY_ID: self.state.entity_id,
}
if "fanSpeedPercent" in params:
service = fan.SERVICE_SET_PERCENTAGE
service_params[fan.ATTR_PERCENTAGE] = params["fanSpeedPercent"]
else:
service = fan.SERVICE_SET_SPEED
service_params[fan.ATTR_SPEED] = params["fanSpeed"]
await self.hass.services.async_call(
fan.DOMAIN,
service,
service_params,
blocking=True,
context=data.context,
)
@register_trait
class ModesTrait(_Trait):
"""Trait to set modes.
https://developers.google.com/actions/smarthome/traits/modes
"""
name = TRAIT_MODES
commands = [COMMAND_MODES]
SYNONYMS = {
"sound mode": ["sound mode", "effects"],
"option": ["option", "setting", "mode", "value"],
}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == input_select.DOMAIN:
return True
if domain == humidifier.DOMAIN and features & humidifier.SUPPORT_MODES:
return True
if domain == light.DOMAIN and features & light.SUPPORT_EFFECT:
return True
if domain != media_player.DOMAIN:
return False
return features & media_player.SUPPORT_SELECT_SOUND_MODE
def _generate(self, name, settings):
"""Generate a list of modes."""
mode = {
"name": name,
"name_values": [
{"name_synonym": self.SYNONYMS.get(name, [name]), "lang": "en"}
],
"settings": [],
"ordered": False,
}
for setting in settings:
mode["settings"].append(
{
"setting_name": setting,
"setting_values": [
{
"setting_synonym": self.SYNONYMS.get(setting, [setting]),
"lang": "en",
}
],
}
)
return mode
def sync_attributes(self):
"""Return mode attributes for a sync request."""
modes = []
for domain, attr, name in (
(media_player.DOMAIN, media_player.ATTR_SOUND_MODE_LIST, "sound mode"),
(input_select.DOMAIN, input_select.ATTR_OPTIONS, "option"),
(humidifier.DOMAIN, humidifier.ATTR_AVAILABLE_MODES, "mode"),
(light.DOMAIN, light.ATTR_EFFECT_LIST, "effect"),
):
if self.state.domain != domain:
continue
items = self.state.attributes.get(attr)
if items is not None:
modes.append(self._generate(name, items))
# Shortcut since all domains are currently unique
break
payload = {"availableModes": modes}
return payload
def query_attributes(self):
"""Return current modes."""
attrs = self.state.attributes
response = {}
mode_settings = {}
if self.state.domain == media_player.DOMAIN:
if media_player.ATTR_SOUND_MODE_LIST in attrs:
mode_settings["sound mode"] = attrs.get(media_player.ATTR_SOUND_MODE)
elif self.state.domain == input_select.DOMAIN:
mode_settings["option"] = self.state.state
elif self.state.domain == humidifier.DOMAIN:
if ATTR_MODE in attrs:
mode_settings["mode"] = attrs.get(ATTR_MODE)
elif self.state.domain == light.DOMAIN and light.ATTR_EFFECT in attrs:
mode_settings["effect"] = attrs.get(light.ATTR_EFFECT)
if mode_settings:
response["on"] = self.state.state not in (STATE_OFF, STATE_UNKNOWN)
response["currentModeSettings"] = mode_settings
return response
async def execute(self, command, data, params, challenge):
"""Execute a SetModes command."""
settings = params.get("updateModeSettings")
if self.state.domain == input_select.DOMAIN:
option = params["updateModeSettings"]["option"]
await self.hass.services.async_call(
input_select.DOMAIN,
input_select.SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: self.state.entity_id,
input_select.ATTR_OPTION: option,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == humidifier.DOMAIN:
requested_mode = settings["mode"]
await self.hass.services.async_call(
humidifier.DOMAIN,
humidifier.SERVICE_SET_MODE,
{
ATTR_MODE: requested_mode,
ATTR_ENTITY_ID: self.state.entity_id,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == light.DOMAIN:
requested_effect = settings["effect"]
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_EFFECT: requested_effect,
},
blocking=True,
context=data.context,
)
return
if self.state.domain != media_player.DOMAIN:
_LOGGER.info(
"Received an Options command for unrecognised domain %s",
self.state.domain,
)
return
sound_mode = settings.get("sound mode")
if sound_mode:
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOUND_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_SOUND_MODE: sound_mode,
},
blocking=True,
context=data.context,
)
@register_trait
class InputSelectorTrait(_Trait):
"""Trait to set modes.
https://developers.google.com/assistant/smarthome/traits/inputselector
"""
name = TRAIT_INPUTSELECTOR
commands = [COMMAND_INPUT, COMMAND_NEXT_INPUT, COMMAND_PREVIOUS_INPUT]
SYNONYMS = {}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == media_player.DOMAIN and (
features & media_player.SUPPORT_SELECT_SOURCE
):
return True
return False
def sync_attributes(self):
"""Return mode attributes for a sync request."""
attrs = self.state.attributes
inputs = [
{"key": source, "names": [{"name_synonym": [source], "lang": "en"}]}
for source in attrs.get(media_player.ATTR_INPUT_SOURCE_LIST, [])
]
payload = {"availableInputs": inputs, "orderedInputs": True}
return payload
def query_attributes(self):
"""Return current modes."""
attrs = self.state.attributes
return {"currentInput": attrs.get(media_player.ATTR_INPUT_SOURCE, "")}
async def execute(self, command, data, params, challenge):
"""Execute an SetInputSource command."""
sources = self.state.attributes.get(media_player.ATTR_INPUT_SOURCE_LIST) or []
source = self.state.attributes.get(media_player.ATTR_INPUT_SOURCE)
if command == COMMAND_INPUT:
requested_source = params.get("newInput")
elif command == COMMAND_NEXT_INPUT:
requested_source = _next_selected(sources, source)
elif command == COMMAND_PREVIOUS_INPUT:
requested_source = _next_selected(list(reversed(sources)), source)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Unsupported command")
if requested_source not in sources:
raise SmartHomeError(ERR_UNSUPPORTED_INPUT, "Unsupported input")
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOURCE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_INPUT_SOURCE: requested_source,
},
blocking=True,
context=data.context,
)
@register_trait
class OpenCloseTrait(_Trait):
"""Trait to open and close a cover.
https://developers.google.com/actions/smarthome/traits/openclose
"""
# Cover device classes that require 2FA
COVER_2FA = (
cover.DEVICE_CLASS_DOOR,
cover.DEVICE_CLASS_GARAGE,
cover.DEVICE_CLASS_GATE,
)
name = TRAIT_OPENCLOSE
commands = [COMMAND_OPENCLOSE, COMMAND_OPENCLOSE_RELATIVE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == cover.DOMAIN:
return True
return domain == binary_sensor.DOMAIN and device_class in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_LOCK,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
)
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return domain == cover.DOMAIN and device_class in OpenCloseTrait.COVER_2FA
def sync_attributes(self):
"""Return opening direction."""
response = {}
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if self.state.domain == binary_sensor.DOMAIN:
response["queryOnlyOpenClose"] = True
response["discreteOnlyOpenClose"] = True
elif (
self.state.domain == cover.DOMAIN
and features & cover.SUPPORT_SET_POSITION == 0
):
response["discreteOnlyOpenClose"] = True
if (
features & cover.SUPPORT_OPEN == 0
and features & cover.SUPPORT_CLOSE == 0
):
response["queryOnlyOpenClose"] = True
if self.state.attributes.get(ATTR_ASSUMED_STATE):
response["commandOnlyOpenClose"] = True
return response
def query_attributes(self):
"""Return state query attributes."""
domain = self.state.domain
response = {}
# When it's an assumed state, we will return empty state
# This shouldn't happen because we set `commandOnlyOpenClose`
# but Google still queries. Erroring here will cause device
# to show up offline.
if self.state.attributes.get(ATTR_ASSUMED_STATE):
return response
if domain == cover.DOMAIN:
if self.state.state == STATE_UNKNOWN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Querying state is not supported"
)
position = self.state.attributes.get(cover.ATTR_CURRENT_POSITION)
if position is not None:
response["openPercent"] = position
elif self.state.state != cover.STATE_CLOSED:
response["openPercent"] = 100
else:
response["openPercent"] = 0
elif domain == binary_sensor.DOMAIN:
if self.state.state == STATE_ON:
response["openPercent"] = 100
else:
response["openPercent"] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute an Open, close, Set position command."""
domain = self.state.domain
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if domain == cover.DOMAIN:
svc_params = {ATTR_ENTITY_ID: self.state.entity_id}
should_verify = False
if command == COMMAND_OPENCLOSE_RELATIVE:
position = self.state.attributes.get(cover.ATTR_CURRENT_POSITION)
if position is None:
raise SmartHomeError(
ERR_NOT_SUPPORTED,
"Current position not know for relative command",
)
position = max(0, min(100, position + params["openRelativePercent"]))
else:
position = params["openPercent"]
if position == 0:
service = cover.SERVICE_CLOSE_COVER
should_verify = False
elif position == 100:
service = cover.SERVICE_OPEN_COVER
should_verify = True
elif features & cover.SUPPORT_SET_POSITION:
service = cover.SERVICE_SET_COVER_POSITION
if position > 0:
should_verify = True
svc_params[cover.ATTR_POSITION] = position
else:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "No support for partial open close"
)
if (
should_verify
and self.state.attributes.get(ATTR_DEVICE_CLASS)
in OpenCloseTrait.COVER_2FA
):
_verify_pin_challenge(data, self.state, challenge)
await self.hass.services.async_call(
cover.DOMAIN, service, svc_params, blocking=True, context=data.context
)
@register_trait
class VolumeTrait(_Trait):
"""Trait to control volume of a device.
https://developers.google.com/actions/smarthome/traits/volume
"""
name = TRAIT_VOLUME
commands = [COMMAND_SET_VOLUME, COMMAND_VOLUME_RELATIVE, COMMAND_MUTE]
@staticmethod
def supported(domain, features, device_class):
"""Test if trait is supported."""
if domain == media_player.DOMAIN:
return features & (
media_player.SUPPORT_VOLUME_SET | media_player.SUPPORT_VOLUME_STEP
)
return False
def sync_attributes(self):
"""Return volume attributes for a sync request."""
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
return {
"volumeCanMuteAndUnmute": bool(features & media_player.SUPPORT_VOLUME_MUTE),
"commandOnlyVolume": self.state.attributes.get(ATTR_ASSUMED_STATE, False),
# Volume amounts in SET_VOLUME and VOLUME_RELATIVE are on a scale
# from 0 to this value.
"volumeMaxLevel": 100,
# Default change for queries like "Hey Google, volume up".
# 10% corresponds to the default behavior for the
# media_player.volume{up,down} services.
"levelStepSize": 10,
}
def query_attributes(self):
"""Return volume query attributes."""
response = {}
level = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
if level is not None:
# Convert 0.0-1.0 to 0-100
response["currentVolume"] = int(level * 100)
muted = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_MUTED)
if muted is not None:
response["isMuted"] = bool(muted)
return response
async def _set_volume_absolute(self, data, level):
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL: level,
},
blocking=True,
context=data.context,
)
async def _execute_set_volume(self, data, params):
level = max(0, min(100, params["volumeLevel"]))
if not (
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& media_player.SUPPORT_VOLUME_SET
):
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
await self._set_volume_absolute(data, level / 100)
async def _execute_volume_relative(self, data, params):
relative = params["relativeSteps"]
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if features & media_player.SUPPORT_VOLUME_SET:
current = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
target = max(0.0, min(1.0, current + relative / 100))
await self._set_volume_absolute(data, target)
elif features & media_player.SUPPORT_VOLUME_STEP:
svc = media_player.SERVICE_VOLUME_UP
if relative < 0:
svc = media_player.SERVICE_VOLUME_DOWN
relative = -relative
for _ in range(relative):
await self.hass.services.async_call(
media_player.DOMAIN,
svc,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
async def _execute_mute(self, data, params):
mute = params["mute"]
if not (
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& media_player.SUPPORT_VOLUME_MUTE
):
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_MUTE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_MUTED: mute,
},
blocking=True,
context=data.context,
)
async def execute(self, command, data, params, challenge):
"""Execute a volume command."""
if command == COMMAND_SET_VOLUME:
await self._execute_set_volume(data, params)
elif command == COMMAND_VOLUME_RELATIVE:
await self._execute_volume_relative(data, params)
elif command == COMMAND_MUTE:
await self._execute_mute(data, params)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
def _verify_pin_challenge(data, state, challenge):
"""Verify a pin challenge."""
if not data.config.should_2fa(state):
return
if not data.config.secure_devices_pin:
raise SmartHomeError(ERR_CHALLENGE_NOT_SETUP, "Challenge is not set up")
if not challenge:
raise ChallengeNeeded(CHALLENGE_PIN_NEEDED)
pin = challenge.get("pin")
if pin != data.config.secure_devices_pin:
raise ChallengeNeeded(CHALLENGE_FAILED_PIN_NEEDED)
def _verify_ack_challenge(data, state, challenge):
"""Verify an ack challenge."""
if not data.config.should_2fa(state):
return
if not challenge or not challenge.get("ack"):
raise ChallengeNeeded(CHALLENGE_ACK_NEEDED)
MEDIA_COMMAND_SUPPORT_MAPPING = {
COMMAND_MEDIA_NEXT: media_player.SUPPORT_NEXT_TRACK,
COMMAND_MEDIA_PAUSE: media_player.SUPPORT_PAUSE,
COMMAND_MEDIA_PREVIOUS: media_player.SUPPORT_PREVIOUS_TRACK,
COMMAND_MEDIA_RESUME: media_player.SUPPORT_PLAY,
COMMAND_MEDIA_SEEK_RELATIVE: media_player.SUPPORT_SEEK,
COMMAND_MEDIA_SEEK_TO_POSITION: media_player.SUPPORT_SEEK,
COMMAND_MEDIA_SHUFFLE: media_player.SUPPORT_SHUFFLE_SET,
COMMAND_MEDIA_STOP: media_player.SUPPORT_STOP,
}
MEDIA_COMMAND_ATTRIBUTES = {
COMMAND_MEDIA_NEXT: "NEXT",
COMMAND_MEDIA_PAUSE: "PAUSE",
COMMAND_MEDIA_PREVIOUS: "PREVIOUS",
COMMAND_MEDIA_RESUME: "RESUME",
COMMAND_MEDIA_SEEK_RELATIVE: "SEEK_RELATIVE",
COMMAND_MEDIA_SEEK_TO_POSITION: "SEEK_TO_POSITION",
COMMAND_MEDIA_SHUFFLE: "SHUFFLE",
COMMAND_MEDIA_STOP: "STOP",
}
@register_trait
class TransportControlTrait(_Trait):
"""Trait to control media playback.
https://developers.google.com/actions/smarthome/traits/transportcontrol
"""
name = TRAIT_TRANSPORT_CONTROL
commands = [
COMMAND_MEDIA_NEXT,
COMMAND_MEDIA_PAUSE,
COMMAND_MEDIA_PREVIOUS,
COMMAND_MEDIA_RESUME,
COMMAND_MEDIA_SEEK_RELATIVE,
COMMAND_MEDIA_SEEK_TO_POSITION,
COMMAND_MEDIA_SHUFFLE,
COMMAND_MEDIA_STOP,
]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == media_player.DOMAIN:
for feature in MEDIA_COMMAND_SUPPORT_MAPPING.values():
if features & feature:
return True
return False
def sync_attributes(self):
"""Return opening direction."""
response = {}
if self.state.domain == media_player.DOMAIN:
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
support = []
for command, feature in MEDIA_COMMAND_SUPPORT_MAPPING.items():
if features & feature:
support.append(MEDIA_COMMAND_ATTRIBUTES[command])
response["transportControlSupportedCommands"] = support
return response
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a media command."""
service_attrs = {ATTR_ENTITY_ID: self.state.entity_id}
if command == COMMAND_MEDIA_SEEK_RELATIVE:
service = media_player.SERVICE_MEDIA_SEEK
rel_position = params["relativePositionMs"] / 1000
seconds_since = 0 # Default to 0 seconds
if self.state.state == STATE_PLAYING:
now = dt.utcnow()
upd_at = self.state.attributes.get(
media_player.ATTR_MEDIA_POSITION_UPDATED_AT, now
)
seconds_since = (now - upd_at).total_seconds()
position = self.state.attributes.get(media_player.ATTR_MEDIA_POSITION, 0)
max_position = self.state.attributes.get(
media_player.ATTR_MEDIA_DURATION, 0
)
service_attrs[media_player.ATTR_MEDIA_SEEK_POSITION] = min(
max(position + seconds_since + rel_position, 0), max_position
)
elif command == COMMAND_MEDIA_SEEK_TO_POSITION:
service = media_player.SERVICE_MEDIA_SEEK
max_position = self.state.attributes.get(
media_player.ATTR_MEDIA_DURATION, 0
)
service_attrs[media_player.ATTR_MEDIA_SEEK_POSITION] = min(
max(params["absPositionMs"] / 1000, 0), max_position
)
elif command == COMMAND_MEDIA_NEXT:
service = media_player.SERVICE_MEDIA_NEXT_TRACK
elif command == COMMAND_MEDIA_PAUSE:
service = media_player.SERVICE_MEDIA_PAUSE
elif command == COMMAND_MEDIA_PREVIOUS:
service = media_player.SERVICE_MEDIA_PREVIOUS_TRACK
elif command == COMMAND_MEDIA_RESUME:
service = media_player.SERVICE_MEDIA_PLAY
elif command == COMMAND_MEDIA_SHUFFLE:
service = media_player.SERVICE_SHUFFLE_SET
# Google Assistant only supports enabling shuffle
service_attrs[media_player.ATTR_MEDIA_SHUFFLE] = True
elif command == COMMAND_MEDIA_STOP:
service = media_player.SERVICE_MEDIA_STOP
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
await self.hass.services.async_call(
media_player.DOMAIN,
service,
service_attrs,
blocking=True,
context=data.context,
)
@register_trait
class MediaStateTrait(_Trait):
"""Trait to get media playback state.
https://developers.google.com/actions/smarthome/traits/mediastate
"""
name = TRAIT_MEDIA_STATE
commands = []
activity_lookup = {
STATE_OFF: "INACTIVE",
STATE_IDLE: "STANDBY",
STATE_PLAYING: "ACTIVE",
STATE_ON: "STANDBY",
STATE_PAUSED: "STANDBY",
STATE_STANDBY: "STANDBY",
STATE_UNAVAILABLE: "INACTIVE",
STATE_UNKNOWN: "INACTIVE",
}
playback_lookup = {
STATE_OFF: "STOPPED",
STATE_IDLE: "STOPPED",
STATE_PLAYING: "PLAYING",
STATE_ON: "STOPPED",
STATE_PAUSED: "PAUSED",
STATE_STANDBY: "STOPPED",
STATE_UNAVAILABLE: "STOPPED",
STATE_UNKNOWN: "STOPPED",
}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == media_player.DOMAIN
def sync_attributes(self):
"""Return attributes for a sync request."""
return {"supportActivityState": True, "supportPlaybackState": True}
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
return {
"activityState": self.activity_lookup.get(self.state.state, "INACTIVE"),
"playbackState": self.playback_lookup.get(self.state.state, "STOPPED"),
}
| 34.640176 | 99 | 0.599688 |
7bc5da4a0e93f0b653be287151df8f075f11bffe | 6,270 | py | Python | sdk/python/pulumi_azure_native/delegatednetwork/v20200808preview/get_delegated_subnet_service_details.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/delegatednetwork/v20200808preview/get_delegated_subnet_service_details.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/delegatednetwork/v20200808preview/get_delegated_subnet_service_details.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetDelegatedSubnetServiceDetailsResult',
'AwaitableGetDelegatedSubnetServiceDetailsResult',
'get_delegated_subnet_service_details',
]
@pulumi.output_type
class GetDelegatedSubnetServiceDetailsResult:
"""
Represents an instance of a orchestrator.
"""
def __init__(__self__, controller_details=None, id=None, location=None, name=None, provisioning_state=None, resource_guid=None, subnet_details=None, tags=None, type=None):
if controller_details and not isinstance(controller_details, dict):
raise TypeError("Expected argument 'controller_details' to be a dict")
pulumi.set(__self__, "controller_details", controller_details)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if subnet_details and not isinstance(subnet_details, dict):
raise TypeError("Expected argument 'subnet_details' to be a dict")
pulumi.set(__self__, "subnet_details", subnet_details)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="controllerDetails")
def controller_details(self) -> Optional['outputs.ControllerDetailsResponse']:
"""
Properties of the controller.
"""
return pulumi.get(self, "controller_details")
@property
@pulumi.getter
def id(self) -> str:
"""
An identifier that represents the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The current state of dnc delegated subnet resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
Resource guid.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="subnetDetails")
def subnet_details(self) -> Optional['outputs.SubnetDetailsResponse']:
"""
subnet details
"""
return pulumi.get(self, "subnet_details")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of resource.
"""
return pulumi.get(self, "type")
class AwaitableGetDelegatedSubnetServiceDetailsResult(GetDelegatedSubnetServiceDetailsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDelegatedSubnetServiceDetailsResult(
controller_details=self.controller_details,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
subnet_details=self.subnet_details,
tags=self.tags,
type=self.type)
def get_delegated_subnet_service_details(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDelegatedSubnetServiceDetailsResult:
"""
Represents an instance of a orchestrator.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the resource. It must be a minimum of 3 characters, and a maximum of 63.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:delegatednetwork/v20200808preview:getDelegatedSubnetServiceDetails', __args__, opts=opts, typ=GetDelegatedSubnetServiceDetailsResult).value
return AwaitableGetDelegatedSubnetServiceDetailsResult(
controller_details=__ret__.controller_details,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
subnet_details=__ret__.subnet_details,
tags=__ret__.tags,
type=__ret__.type)
| 36.666667 | 189 | 0.657097 |
9972ab8c83a77737907ad98ff086fcd847fec56c | 14,809 | py | Python | experiment/generate_tests.py | haoshuwei/test-infra | 4696b77f4ee7cfffe8e86a8b8e84c797d6846bfd | [
"Apache-2.0"
] | null | null | null | experiment/generate_tests.py | haoshuwei/test-infra | 4696b77f4ee7cfffe8e86a8b8e84c797d6846bfd | [
"Apache-2.0"
] | null | null | null | experiment/generate_tests.py | haoshuwei/test-infra | 4696b77f4ee7cfffe8e86a8b8e84c797d6846bfd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create e2e test definitions.
Usage example:
In $GOPATH/src/k8s.io/test-infra,
$ bazel run //experiment:generate_tests -- \
--yaml-config-path=experiment/test_config.yaml \
"""
import argparse
import hashlib
import os
import ruamel.yaml as yaml
# TODO(yguo0905): Generate Prow and testgrid configurations.
PROW_CONFIG_TEMPLATE = """
tags:
- generated # AUTO-GENERATED by experiment/generate_tests.py - DO NOT EDIT!
interval:
cron:
labels:
preset-service-account: "true"
preset-k8s-ssh: "true"
name:
spec:
containers:
- args:
env:
image: gcr.io/k8s-testimages/kubekins-e2e:v20191024-4a89182-master
"""
E2E_TESTGRID_CONFIG_TEMPLATE = """
name:
gcs_prefix:
column_header:
- configuration_value: node_os_image
- configuration_value: master_os_image
- configuration_value: Commit
- configuration_value: infra-commit
"""
GCS_LOG_PREFIX = "kubernetes-jenkins/logs/"
COMMENT = 'AUTO-GENERATED by experiment/generate_tests.py - DO NOT EDIT.'
def get_sha1_hash(data):
"""Returns the SHA1 hash of the specified data."""
sha1_hash = hashlib.sha1()
sha1_hash.update(data.encode('utf-8'))
return sha1_hash.hexdigest()
def substitute(job_name, lines):
"""Replace '${job_name_hash}' in lines with the SHA1 hash of job_name."""
return [line.replace('${job_name_hash}', get_sha1_hash(job_name)[:10]) \
for line in lines]
def get_args(job_name, field):
"""Returns a list of args for the given field."""
if not field:
return []
return substitute(job_name, field.get('args', []))
def write_prow_configs_file(output_file, job_defs):
"""Writes the Prow configurations into output_file."""
with open(output_file, 'w') as fp:
yaml.dump(
job_defs, fp, Dumper=yaml.RoundTripDumper, width=float("inf"))
def write_testgrid_config_file(output_file, testgrid_config):
"""Writes the TestGrid test group configurations into output_file."""
with open(output_file, 'w') as fp:
fp.write('# ' + COMMENT + '\n\n')
yaml.dump(
testgrid_config, fp, Dumper=yaml.RoundTripDumper, width=float("inf"))
def apply_job_overrides(envs_or_args, job_envs_or_args):
'''Applies the envs or args overrides defined in the job level'''
for job_env_or_arg in job_envs_or_args:
name = job_env_or_arg.split('=', 1)[0]
env_or_arg = next(
(x for x in envs_or_args if (x.strip().startswith('%s=' % name) or
x.strip() == name)), None)
if env_or_arg:
envs_or_args.remove(env_or_arg)
envs_or_args.append(job_env_or_arg)
class E2ENodeTest(object):
def __init__(self, job_name, job, config):
self.job_name = job_name
self.job = job
self.common = config['nodeCommon']
self.images = config['nodeImages']
self.k8s_versions = config['nodeK8sVersions']
self.test_suites = config['nodeTestSuites']
def __get_job_def(self, args):
"""Returns the job definition from the given args."""
return {
'scenario': 'kubernetes_e2e',
'args': args,
'sigOwners': self.job.get('sigOwners') or ['UNNOWN'],
# Indicates that this job definition is auto-generated.
'tags': ['generated'],
'_comment': COMMENT,
}
def __get_prow_config(self, test_suite, k8s_version):
"""Returns the Prow config for the job from the given fields."""
prow_config = yaml.round_trip_load(PROW_CONFIG_TEMPLATE)
prow_config['name'] = self.job_name
if 'interval' in self.job:
del prow_config['cron']
prow_config['interval'] = self.job['interval']
elif 'cron' in self.job:
del prow_config['cron']
prow_config['cron'] = self.job['cron']
else:
raise Exception("no interval or cron definition found")
# Assumes that the value in --timeout is of minutes.
timeout = int(next(
x[10:-1] for x in test_suite['args'] if (
x.startswith('--timeout='))))
container = prow_config['spec']['containers'][0]
if not container['args']:
container['args'] = []
if not container['env']:
container['env'] = []
# Prow timeout = job timeout + 20min
container['args'].append('--timeout=%d' % (timeout + 20))
container['args'].extend(k8s_version.get('args', []))
container['args'].append('--root=/go/src')
container['env'].extend([{'name':'GOPATH', 'value': '/go'}])
# Specify the appropriate kubekins-e2e image. This allows us to use a
# specific image (containing a particular Go version) to build and
# trigger the node e2e test to avoid issues like
# https://github.com/kubernetes/kubernetes/issues/43534.
if k8s_version.get('prowImage', None):
container['image'] = k8s_version['prowImage']
return prow_config
def generate(self):
'''Returns the job and the Prow configurations for this test.'''
fields = self.job_name.split('-')
if len(fields) != 6:
raise ValueError('Expected 6 fields in job name', self.job_name)
image = self.images[fields[3]]
k8s_version = self.k8s_versions[fields[4][3:]]
test_suite = self.test_suites[fields[5]]
# envs are disallowed in node e2e tests.
if 'envs' in self.common or 'envs' in image or 'envs' in test_suite:
raise ValueError(
'envs are disallowed in node e2e test', self.job_name)
# Generates args.
args = []
args.extend(get_args(self.job_name, self.common))
args.extend(get_args(self.job_name, image))
args.extend(get_args(self.job_name, test_suite))
# Generates job config.
job_config = self.__get_job_def(args)
# Generates prow config.
prow_config = self.__get_prow_config(test_suite, k8s_version)
# Combine --node-args
node_args = []
job_args = []
for arg in job_config['args']:
if '--node-args=' in arg:
node_args.append(arg.split('=', 1)[1])
else:
job_args.append(arg)
if node_args:
flag = '--node-args='
for node_arg in node_args:
flag += '%s ' % node_arg
job_args.append(flag.strip())
job_config['args'] = job_args
if image.get('testgrid_prefix') is not None:
dashboard = '%s-%s-%s' % (image['testgrid_prefix'], fields[3],
fields[4])
annotations = prow_config.setdefault('annotations', {})
annotations['testgrid-dashboards'] = dashboard
tab_name = '%s-%s-%s' % (fields[3], fields[4], fields[5])
annotations['testgrid-tab-name'] = tab_name
return job_config, prow_config, None
class E2ETest(object):
def __init__(self, output_dir, job_name, job, config):
self.env_filename = os.path.join(output_dir, '%s.env' % job_name),
self.job_name = job_name
self.job = job
self.common = config['common']
self.cloud_providers = config['cloudProviders']
self.images = config['images']
self.k8s_versions = config['k8sVersions']
self.test_suites = config['testSuites']
def __get_job_def(self, args):
"""Returns the job definition from the given args."""
return {
'scenario': 'kubernetes_e2e',
'args': args,
'sigOwners': self.job.get('sigOwners') or ['UNNOWN'],
# Indicates that this job definition is auto-generated.
'tags': ['generated'],
'_comment': COMMENT,
}
def __get_prow_config(self, test_suite):
"""Returns the Prow config for the e2e job from the given fields."""
prow_config = yaml.round_trip_load(PROW_CONFIG_TEMPLATE)
prow_config['name'] = self.job_name
if 'interval' in self.job:
del prow_config['cron']
prow_config['interval'] = self.job['interval']
elif 'cron' in self.job:
del prow_config['interval']
prow_config['cron'] = self.job['cron']
else:
raise Exception("no interval or cron definition found")
# Assumes that the value in --timeout is of minutes.
timeout = int(next(
x[10:-1] for x in test_suite['args'] if (
x.startswith('--timeout='))))
container = prow_config['spec']['containers'][0]
if not container['args']:
container['args'] = []
container['args'].append('--bare')
# Prow timeout = job timeout + 20min
container['args'].append('--timeout=%d' % (timeout + 20))
return prow_config
def __get_testgrid_config(self):
tg_config = yaml.round_trip_load(E2E_TESTGRID_CONFIG_TEMPLATE)
tg_config['name'] = self.job_name
tg_config['gcs_prefix'] = GCS_LOG_PREFIX + self.job_name
return tg_config
def initialize_dashboards_with_release_blocking_info(self, version):
dashboards = []
if self.job.get('releaseBlocking'):
dashboards.append('sig-release-%s-blocking' % version)
elif self.job.get('releaseInforming'):
dashboards.append('sig-release-%s-informing' % version)
else:
dashboards.append('sig-release-%s-all' % version)
return dashboards
def generate(self):
'''Returns the job and the Prow configurations for this test.'''
fields = self.job_name.split('-')
if len(fields) != 7:
raise ValueError('Expected 7 fields in job name', self.job_name)
cloud_provider = self.cloud_providers[fields[3]]
image = self.images[fields[4]]
k8s_version = self.k8s_versions[fields[5][3:]]
test_suite = self.test_suites[fields[6]]
# Generates args.
args = []
args.extend(get_args(self.job_name, self.common))
args.extend(get_args(self.job_name, cloud_provider))
args.extend(get_args(self.job_name, image))
args.extend(get_args(self.job_name, k8s_version))
args.extend(get_args(self.job_name, test_suite))
# Generates job config.
job_config = self.__get_job_def(args)
# Generates Prow config.
prow_config = self.__get_prow_config(test_suite)
tg_config = self.__get_testgrid_config()
annotations = prow_config.setdefault('annotations', {})
tab_name = '%s-%s-%s-%s' % (fields[3], fields[4], fields[5], fields[6])
annotations['testgrid-tab-name'] = tab_name
dashboards = self.initialize_dashboards_with_release_blocking_info(k8s_version['version'])
if image.get('testgrid_prefix') is not None:
dashboard = '%s-%s-%s' % (image['testgrid_prefix'], fields[4],
fields[5])
dashboards.append(dashboard)
annotations['testgrid-dashboards'] = ', '.join(dashboards)
if 'testgridNumFailuresToAlert' in self.job:
annotations['testgrid-num-failures-to-alert'] = ('%s' %
self.job['testgridNumFailuresToAlert'])
return job_config, prow_config, tg_config
def for_each_job(output_dir, job_name, job, yaml_config):
"""Returns the job config and the Prow config for one test job."""
fields = job_name.split('-')
if len(fields) < 3:
raise ValueError('Expected at least 3 fields in job name', job_name)
job_type = fields[2]
# Generates configurations.
if job_type == 'e2e':
generator = E2ETest(output_dir, job_name, job, yaml_config)
elif job_type == 'e2enode':
generator = E2ENodeTest(job_name, job, yaml_config)
else:
raise ValueError('Unexpected job type ', job_type)
job_config, prow_config, testgrid_config = generator.generate()
# Applies job-level overrides.
apply_job_overrides(job_config['args'], get_args(job_name, job))
# merge job_config into prow_config
args = prow_config['spec']['containers'][0]['args']
args.append('--scenario=' + job_config['scenario'])
args.append('--')
args.extend(job_config['args'])
return prow_config, testgrid_config
def main(yaml_config_path, output_dir, testgrid_output_path):
"""Creates test job definitions.
Converts the test configurations in yaml_config_path to the job definitions
in output_dir/generated.yaml.
"""
# TODO(yguo0905): Validate the configurations from yaml_config_path.
with open(yaml_config_path) as fp:
yaml_config = yaml.safe_load(fp)
output_config = {}
output_config['periodics'] = []
testgrid_config = {'test_groups': []}
for job_name, _ in yaml_config['jobs'].items():
# Get the envs and args for each job defined under "jobs".
prow, testgrid = for_each_job(
output_dir, job_name, yaml_config['jobs'][job_name], yaml_config)
output_config['periodics'].append(prow)
if testgrid is not None:
testgrid_config['test_groups'].append(testgrid)
# Write the job definitions to --output-dir/generated.yaml
write_prow_configs_file(output_dir + 'generated.yaml', output_config)
write_testgrid_config_file(testgrid_output_path, testgrid_config)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description='Create test definitions from the given yaml config')
PARSER.add_argument('--yaml-config-path', help='Path to config.yaml')
PARSER.add_argument(
'--output-dir',
help='Prowjob config output dir',
default='config/jobs/kubernetes/generated/')
PARSER.add_argument(
'--testgrid-output-path',
help='Path to testgrid output file',
default='config/testgrids/generated-test-config.yaml')
ARGS = PARSER.parse_args()
main(
ARGS.yaml_config_path,
ARGS.output_dir,
ARGS.testgrid_output_path)
| 37.586294 | 100 | 0.627254 |
e736ac3d849d12cc977b4baa2e2ed634ae8e2498 | 1,038 | py | Python | examples/script_archive/14b_tuun_minimize.py | petuum/tuun | 8eec472dbf0e5e695449b0fa2d98985469fd5b30 | [
"Apache-2.0"
] | 33 | 2020-08-30T16:22:35.000Z | 2022-02-26T13:48:32.000Z | examples/script_archive/14b_tuun_minimize.py | petuum/tuun | 8eec472dbf0e5e695449b0fa2d98985469fd5b30 | [
"Apache-2.0"
] | 2 | 2021-01-18T19:46:43.000Z | 2021-03-24T09:59:14.000Z | examples/script_archive/14b_tuun_minimize.py | petuum/tuun | 8eec472dbf0e5e695449b0fa2d98985469fd5b30 | [
"Apache-2.0"
] | 2 | 2020-08-25T17:02:15.000Z | 2021-04-21T16:40:44.000Z | from tuun.main import Tuun
config = {
'seed': 12,
'acqfunction_config': {'name': 'default', 'acq_str': 'ucb'},
}
tu = Tuun(config)
# Update Tuun using set_config_from_search_space_list
#search_space_list = [('list', ['a', 'b', 'c', 'd', 'e']), ('real', [[-10, 10], [-4, 4]])]
#search_space_list = [('list', ['a', 'b', 'c', 'd', 'e']), ('real', [-10, 10]), ('real', [-4, 4])]
#search_space_list = [('real', [-10, 10]), ('real', [-4, 4])]
search_space_list = [('real', [-10, 10]), ('real', [-4, 4]), ('list', ['a', 'b', 'c', 'd', 'e'])]
#search_space_list = [('real', [-10, 10]), ('real', [-4, 4]),
#('list', ['a', 'b', 'c', 'd', 'e']),
#('list', ['cat', 'hello', 'kitty']), ('real', [-5, 100])]
tu.set_config_from_list(search_space_list)
f_s = lambda x: x ** 4 - x ** 2 + 0.1 * x
#f = lambda x_list: f_s(x_list[1][0]) + f_s(x_list[1][1])
#f = lambda x_list: f_s(x_list[0][0]) + f_s(x_list[0][1])
f = lambda x_list: f_s(x_list[0]) + f_s(x_list[1])
result = tu.minimize_function(f, 50)
| 39.923077 | 98 | 0.512524 |
8c6523859a99b21393aade5902cdf696fedfdf87 | 4,049 | py | Python | lnbits/extensions/usermanager/views_api.py | blackcoffeexbt/lnbits-legend | a9f2877af77ea56d1900e2b5bc1c21b9b7ac2f64 | [
"MIT"
] | null | null | null | lnbits/extensions/usermanager/views_api.py | blackcoffeexbt/lnbits-legend | a9f2877af77ea56d1900e2b5bc1c21b9b7ac2f64 | [
"MIT"
] | 2 | 2022-03-22T06:27:41.000Z | 2022-03-23T02:05:30.000Z | lnbits/extensions/usermanager/views_api.py | blackcoffeexbt/lnbits-legend | a9f2877af77ea56d1900e2b5bc1c21b9b7ac2f64 | [
"MIT"
] | null | null | null | from http import HTTPStatus
from fastapi import Query
from fastapi.params import Depends
from starlette.exceptions import HTTPException
from lnbits.core import update_user_extension
from lnbits.core.crud import get_user
from lnbits.decorators import WalletTypeInfo, get_key_type
from . import usermanager_ext
from .crud import (
create_usermanager_user,
create_usermanager_wallet,
delete_usermanager_user,
delete_usermanager_wallet,
get_usermanager_user,
get_usermanager_users,
get_usermanager_users_wallets,
get_usermanager_wallet,
get_usermanager_wallet_transactions,
get_usermanager_wallets,
)
from .models import CreateUserData, CreateUserWallet
# Users
@usermanager_ext.get("/api/v1/users", status_code=HTTPStatus.OK)
async def api_usermanager_users(wallet: WalletTypeInfo = Depends(get_key_type)):
user_id = wallet.wallet.user
return [user.dict() for user in await get_usermanager_users(user_id)]
@usermanager_ext.get("/api/v1/users/{user_id}", status_code=HTTPStatus.OK)
async def api_usermanager_user(user_id, wallet: WalletTypeInfo = Depends(get_key_type)):
user = await get_usermanager_user(user_id)
return user.dict()
@usermanager_ext.post("/api/v1/users", status_code=HTTPStatus.CREATED)
async def api_usermanager_users_create(
data: CreateUserData, wallet: WalletTypeInfo = Depends(get_key_type)
):
user = await create_usermanager_user(data)
full = user.dict()
full["wallets"] = [
wallet.dict() for wallet in await get_usermanager_users_wallets(user.id)
]
return full
@usermanager_ext.delete("/api/v1/users/{user_id}")
async def api_usermanager_users_delete(
user_id, wallet: WalletTypeInfo = Depends(get_key_type)
):
user = await get_usermanager_user(user_id)
if not user:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail="User does not exist."
)
await delete_usermanager_user(user_id)
raise HTTPException(status_code=HTTPStatus.NO_CONTENT)
# Activate Extension
@usermanager_ext.post("/api/v1/extensions")
async def api_usermanager_activate_extension(
extension: str = Query(...), userid: str = Query(...), active: bool = Query(...)
):
user = await get_user(userid)
if not user:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail="User does not exist."
)
update_user_extension(user_id=userid, extension=extension, active=active)
return {"extension": "updated"}
# Wallets
@usermanager_ext.post("/api/v1/wallets")
async def api_usermanager_wallets_create(
data: CreateUserWallet, wallet: WalletTypeInfo = Depends(get_key_type)
):
user = await create_usermanager_wallet(
user_id=data.user_id, wallet_name=data.wallet_name, admin_id=data.admin_id
)
return user.dict()
@usermanager_ext.get("/api/v1/wallets")
async def api_usermanager_wallets(wallet: WalletTypeInfo = Depends(get_key_type)):
admin_id = wallet.wallet.user
return [wallet.dict() for wallet in await get_usermanager_wallets(admin_id)]
@usermanager_ext.get("/api/v1/transactions/{wallet_id}")
async def api_usermanager_wallet_transactions(
wallet_id, wallet: WalletTypeInfo = Depends(get_key_type)
):
return await get_usermanager_wallet_transactions(wallet_id)
@usermanager_ext.get("/api/v1/wallets/{user_id}")
async def api_usermanager_users_wallets(
user_id, wallet: WalletTypeInfo = Depends(get_key_type)
):
return [
s_wallet.dict() for s_wallet in await get_usermanager_users_wallets(user_id)
]
@usermanager_ext.delete("/api/v1/wallets/{wallet_id}")
async def api_usermanager_wallets_delete(
wallet_id, wallet: WalletTypeInfo = Depends(get_key_type)
):
get_wallet = await get_usermanager_wallet(wallet_id)
if not get_wallet:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail="Wallet does not exist."
)
await delete_usermanager_wallet(wallet_id, get_wallet.user)
raise HTTPException(status_code=HTTPStatus.NO_CONTENT)
| 31.632813 | 88 | 0.75673 |
bf7204fc9f7517adcaffe75aa060caf74e0544ab | 2,067 | py | Python | cbagent/collectors/libstats/pool.py | jimwwalker/perfrunner | fda19ce9d860f8548b3feeb761b9a70598500cce | [
"Apache-2.0"
] | null | null | null | cbagent/collectors/libstats/pool.py | jimwwalker/perfrunner | fda19ce9d860f8548b3feeb761b9a70598500cce | [
"Apache-2.0"
] | 4 | 2021-04-20T17:14:17.000Z | 2022-02-11T03:42:49.000Z | cbagent/collectors/libstats/pool.py | jimwwalker/perfrunner | fda19ce9d860f8548b3feeb761b9a70598500cce | [
"Apache-2.0"
] | 1 | 2018-06-25T18:57:10.000Z | 2018-06-25T18:57:10.000Z | from queue import Empty, Queue
from threading import Lock
from time import time
from couchbase.bucket import Bucket
class ClientUnavailableError(Exception):
pass
class BucketWrapper(Bucket):
def __init__(self, **kwargs):
connection_string = 'couchbase://{}:{}/{}'.format(
kwargs['host'], kwargs.get('port', 8091), kwargs['bucket'])
super().__init__(connection_string,
password=kwargs['password'], quiet=kwargs['quiet'])
self.use_count = 0
self.use_time = 0
self.last_use_time = 0
def start_using(self):
self.last_use_time = time()
def stop_using(self):
self.use_time += time() - self.last_use_time
self.use_count += 1
class Pool:
def __init__(self, initial=10, max_clients=20, **connargs):
self._q = Queue()
self._l = []
self._connargs = connargs
self._cur_clients = 0
self._max_clients = max_clients
self._lock = Lock()
for x in range(initial):
self._q.put(self._make_client())
self._cur_clients += 1
def _make_client(self):
bucket = BucketWrapper(**self._connargs)
self._l.append(bucket)
return bucket
def get_client(self, initial_timeout=0.05, next_timeout=200):
try:
return self._q.get(True, initial_timeout)
except Empty:
try:
self._lock.acquire()
if self._cur_clients == self._max_clients:
raise ClientUnavailableError("Too many clients in use")
cb = self._make_client()
self._cur_clients += 1
cb.start_using()
return cb
except ClientUnavailableError as ex:
try:
return self._q.get(True, next_timeout)
except Empty:
raise ex
finally:
self._lock.release()
def release_client(self, cb):
cb.stop_using()
self._q.put(cb, True)
| 27.56 | 76 | 0.565554 |
4ef45e3d7f81a3a42bb6b056c3d007c2f127501e | 1,357 | py | Python | location_analyzer.py | turbotardigrade/fly | b2f2675c9b5f921aecbc0ee1c5a78b4435bc642c | [
"MIT"
] | null | null | null | location_analyzer.py | turbotardigrade/fly | b2f2675c9b5f921aecbc0ee1c5a78b4435bc642c | [
"MIT"
] | null | null | null | location_analyzer.py | turbotardigrade/fly | b2f2675c9b5f921aecbc0ee1c5a78b4435bc642c | [
"MIT"
] | null | null | null | import json
import numpy as np
from sklearn import cluster
def analyze(raw_data):
data = json.loads(raw_data)['locations']
data_size = len(data)
print("# of locations %d" % data_size)
reduceByFactor = 50
locations = [None for _ in xrange(data_size/reduceByFactor+1)]
# Reduce the data size
j = 0
for i in xrange(0, data_size, reduceByFactor):
loc = data[i]
lat, lon= int(loc['latitudeE7']), int(loc['longitudeE7'])
locations[j] = [lat, lon]
j += 1
locations = filter(lambda x: x is not None, locations)
# Use K-Mean clustering to get k hotspots
k = 20
kmeans = cluster.KMeans(n_clusters=k, n_jobs=2)
data = np.array(locations, np.int32)
kmeans.fit(data)
points = []
for centroid in kmeans.cluster_centers_:
points.append({
"lat": centroid[0]*1e-7,
"lng": centroid[1]*1e-7
})
labels = []
for count in np.bincount(kmeans.labels_):
labels.append(float(count)/len(data)*100)
result = []
for l, p in zip(labels, points):
result.append({"position": p, "label": round(l,2)})
result.sort(key=lambda x: x["label"], reverse=True)
# print results
for r in result:
print("position: {0}, label: {1}".format(r["label"], r["position"]))
return result, data_size
| 27.14 | 76 | 0.60059 |
40b7324ce7831c3981a74d978e20f6a008a7c2c3 | 4,685 | py | Python | ProtocolDetector/Engine.py | csandoval/ProtocolDetector | e71960cdba2adbe8e385c033dff1bbcdd6f998e1 | [
"MIT"
] | 9 | 2017-09-10T12:35:12.000Z | 2020-04-20T23:04:29.000Z | ProtocolDetector/Engine.py | csandoval/ProtocolDetector | e71960cdba2adbe8e385c033dff1bbcdd6f998e1 | [
"MIT"
] | null | null | null | ProtocolDetector/Engine.py | csandoval/ProtocolDetector | e71960cdba2adbe8e385c033dff1bbcdd6f998e1 | [
"MIT"
] | 7 | 2017-09-09T16:51:39.000Z | 2020-07-27T16:32:42.000Z | #!/usr/bin/env python
#
#=============================================================================
#
# File Name : Engine.py
# Author : Jose Ramon Palanco <jose.palanco@dinoflux.com>,
# Creation Date : October 2017
#
#
#
#=============================================================================
#
# PRODUCT : ProtocolDetector
#
# MODULE :
#
# ROLE : identification of protocols using Yara rules
#
# DEPENDANCE SYS. : yara
#
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
#
#=============================================================================
#
import dpkt
import pcapy
import yara
import os
import socket
from IPy import IP
def check_yara(rules, buf):
# print buf
# for character in buf:
# sys.stdout.write(character.encode('hex'))
# sys.stdout.flush()
# print ''
try:
matches = rules.match(data=buf)
results = []
for match in matches:
results.append(match.rule)
if results:
return results
else:
return []
except TypeError as e:
pass
def detect_protocol(buf, options):
rules = options['rules']
ptype = None
data_buf = None
dport = None
sport = None
try:
eth = dpkt.ethernet.Ethernet(buf)
ip=eth.data
if type(ip.data) == dpkt.icmp.ICMP:
return
if type(ip.data) == dpkt.tcp.TCP:
ptype = 'tcp'
tcp=ip.data
data_buf = tcp.data
dport = tcp.dport
sport = tcp.sport
elif type(ip.data) == dpkt.udp.UDP:
ptype = 'udp'
udp=ip.data
data_buf = udp.data
dport = udp.dport
sport = udp.sport
matches = check_yara(rules, data_buf)
try:
src_ip = socket.inet_ntoa(ip.src)
dst_ip = socket.inet_ntoa(ip.dst)
except socket.error:
return None
if len(matches)<1:
#return None
matches.append(ptype)
if not options['socks_proxy'] and options['remove_local']:
ip = IP(dst_ip)
if ip.iptype() == 'PRIVATE':
return None
return { 'protocols' : matches, 'dport': dport, 'sport': sport, 'src': src_ip, 'dst': dst_ip }
except AttributeError:
pass
except dpkt.dpkt.NeedData:
pass
# FIXME: is not optimal parse everything all the time. We should handle sessions
def resolve_socks_proxy(sport, options):
pcap_path = options['pcap_path']
pcap_file = open(pcap_path)
pcap=dpkt.pcap.Reader(pcap_file)
for ts, buf in pcap:
try:
eth = dpkt.ethernet.Ethernet(buf)
except dpkt.dpkt.NeedData:
continue
ip=eth.data
if type(ip.data) == dpkt.tcp.TCP or type(ip.data) == dpkt.udp.UDP:
tcp=ip.data
if tcp.dport == sport:
# IMPORTANT: This is not a bug, we recover src as dst
res = { 'dport' : tcp.sport, 'dst': socket.inet_ntoa(ip.src) }
if options['remove_local']:
ip = IP(res['dst'])
if ip.iptype() == 'PRIVATE':
continue
return res
def perform_check(buf, options):
rules = options['rules']
socks_proxy = options['socks_proxy']
pcap_path = options['pcap_path']
protocol_details = detect_protocol(buf, options)
if protocol_details == None:
return None
if socks_proxy:
try:
socks_details = resolve_socks_proxy(protocol_details['sport'], options)
protocol_details['dport'] = socks_details['dport']
protocol_details['dst'] = socks_details['dst']
except TypeError:
return None
return protocol_details
def get_rules():
rules = yara.compile(filepath=os.path.dirname(__file__)+ os.sep + 'rules/index.yar')
return rules
def analyze_pcap(options):
pcap_path = options['pcap_path']
pcap_file = open(pcap_path)
try:
pcap=dpkt.pcap.Reader(pcap_file)
except dpkt.dpkt.NeedData:
return
for ts, buf in pcap:
results = perform_check(buf, options )
if results is not None:
print(results)
def analyze_interface(options):
iface = options['iface']
cap=pcapy.open_live(iface,100000,1,0)
(header,payload)=cap.next()
buf = str(payload)
while header:
perform_check(buf, options)
# i need to know whether it is a tcp or a udp packet here!!!
(header,payload)=cap.next()
| 26.027778 | 103 | 0.524653 |
c2eddce337241b09bbe13729db72bb8c232f1e63 | 3,340 | py | Python | simplemonitor/Alerters/telegram.py | danieleteti/simplemonitor | e2cb5c22dd72145035f2e68cd9ce90e77fd147c3 | [
"BSD-3-Clause"
] | null | null | null | simplemonitor/Alerters/telegram.py | danieleteti/simplemonitor | e2cb5c22dd72145035f2e68cd9ce90e77fd147c3 | [
"BSD-3-Clause"
] | null | null | null | simplemonitor/Alerters/telegram.py | danieleteti/simplemonitor | e2cb5c22dd72145035f2e68cd9ce90e77fd147c3 | [
"BSD-3-Clause"
] | null | null | null | # coding=utf-8
from typing import cast
import requests
from ..Monitors.monitor import Monitor
from ..util import format_datetime
from .alerter import Alerter, register
@register
class TelegramAlerter(Alerter):
"""Send push notification via Telegram."""
type = "telegram"
def __init__(self, config_options: dict) -> None:
super().__init__(config_options)
self.telegram_token = cast(
str, self.get_config_option("token", required=True, allow_empty=False)
)
self.telegram_chatid = cast(
str, self.get_config_option("chat_id", required=True, allow_empty=False)
)
self.support_catchup = True
def send_telegram_notification(self, body: str) -> None:
"""Send a push notification."""
r = requests.post(
"https://api.telegram.org/bot{}/sendMessage".format(self.telegram_token),
data={"chat_id": self.telegram_chatid, "text": body},
)
if not r.status_code == requests.codes.ok:
raise RuntimeError("Unable to send telegram notification")
def send_alert(self, name: str, monitor: Monitor) -> None:
"""Build up the content for the push notification."""
type = self.should_alert(monitor)
(days, hours, minutes, seconds) = monitor.get_downtime()
if monitor.is_remote():
host = " on %s " % monitor.running_on
else:
host = " on host %s" % self.hostname
body = ""
if type == "":
return
elif type == "failure":
body = """Monitor %s DOWN
Failed at: %s
Downtime: %d+%02d:%02d:%02d
Description: %s""" % (
name,
format_datetime(monitor.first_failure_time()),
days,
hours,
minutes,
seconds,
monitor.describe(),
)
try:
if monitor.recover_info != "":
body += "\nRecovery info: %s" % monitor.recover_info
except AttributeError:
body += "\nNo recovery info available"
elif type == "success":
body = """Monitor %s UP
Originally failed at: %s
Downtime: %d+%02d:%02d:%02d
Description: %s""" % (
name,
format_datetime(monitor.first_failure_time()),
days,
hours,
minutes,
seconds,
monitor.describe(),
)
elif type == "catchup":
body = (
"Monitor %s%s failed earlier while this alerter was out of hours.\nFailed at: %s\nDescription: %s"
% (
name,
host,
format_datetime(monitor.first_failure_time()),
monitor.describe(),
)
)
else:
self.alerter_logger.error("Unknown alert type %s", type)
return
if not self.dry_run:
try:
self.send_telegram_notification(body)
except Exception:
self.alerter_logger.exception("Couldn't send push notification")
self.available = False
else:
self.alerter_logger.info("dry_run: would send push notification: %s" % body)
| 30.09009 | 114 | 0.534731 |
242862b179e4636a0fe407e532968c01331ccb83 | 10,490 | py | Python | tasks.py | knowsuchagency/docker-compose-airflow | ab3fae08793bd304f0027fc9174544802fa2c3fa | [
"Apache-2.0"
] | 7 | 2019-02-23T03:32:12.000Z | 2022-02-14T00:46:09.000Z | tasks.py | knowsuchagency/docker-compose-airflow | ab3fae08793bd304f0027fc9174544802fa2c3fa | [
"Apache-2.0"
] | null | null | null | tasks.py | knowsuchagency/docker-compose-airflow | ab3fae08793bd304f0027fc9174544802fa2c3fa | [
"Apache-2.0"
] | 2 | 2021-09-09T15:39:00.000Z | 2021-12-30T10:16:49.000Z | from concurrent import futures
from functools import partial
from pprint import pprint
from pathlib import Path
import subprocess as sp
import itertools as it
import datetime as dt
import atexit
import os
import re
from importlib_resources import read_text
from invoke import task
from jinja2 import Template
shell = partial(sp.run, stdin=sp.PIPE, stdout=sp.PIPE, universal_newlines=True)
@task
def swarm_up(c):
"""
Create a docker swarm on google cloud.
"""
project = c.config.gcp.project
zone = c.config.gcp.zone
machine_type = c.config.gcp.machine_type
managers = c.config.gcp.managers
workers = c.config.gcp.workers
machines_desired = tuple(
it.chain(
it.product(("manager",), range(managers)),
it.product(("worker",), range(workers)),
)
)
with futures.ThreadPoolExecutor() as ex:
for role, number in machines_desired:
ex.submit(
c.run,
f"""
docker-machine create \
--driver google \
--google-project {project} \
--google-zone {zone} \
--google-machine-type {machine_type} \
--google-tags docker \
swarm-{role}-{number}
""",
warn=True,
)
for role, number in machines_desired:
machine_name = f"swarm-{role}-{number}"
if role == "manager" and number == 0:
manager_name = machine_name
manager_ip = c.run(
f"""
gcloud compute instances describe \
--project {project} \
--zone {zone} \
--format 'value(networkInterfaces[0].networkIP)' \
{machine_name}
"""
).stdout.strip()
c.run(
f"""
docker-machine ssh {machine_name} sudo docker swarm init \
--advertise-addr {manager_ip}
""",
warn=True,
)
elif role == "manager":
manager_token = c.run(
f"docker-machine ssh {manager_name} sudo docker swarm join-token manager | grep token |"
+ " awk '{ print $5 }'"
).stdout.strip()
c.run(
f"""
docker-machine ssh {machine_name} sudo docker swarm join \
--token {manager_token} \
{manager_ip}:2377
""",
warn=True,
)
else:
worker_token = c.run(
f"docker-machine ssh {manager_name}"
+ " sudo docker swarm join-token worker | grep token | awk '{ print $5 }'"
).stdout.strip()
c.run(
f"""
docker-machine ssh {machine_name} sudo docker swarm join \
--token {worker_token} \
{manager_ip}:2377
""",
warn=True,
)
@task
def swarm_down(c):
"""Take the swarm workers down."""
configure_prod_or_local(c, prod=False)
with futures.ThreadPoolExecutor() as ex:
for line in c.run("docker-machine ls", hide=True).stdout.splitlines()[
1:
]:
if not any(
line.startswith(n) for n in ("swarm-manager", "swarm-worker")
):
continue
name, *_ = line.split()
ex.submit(c.run, f"docker-machine rm -f {name}", warn=True)
@task
def rebuild(c):
"""Rebuild and push to remote repository."""
c.run("docker-compose build")
c.run("docker-compose push", pty=True)
@task(aliases=["up"])
def deploy(c, rebuild_=False, stack=False, prod=False, ngrok=False):
"""
Deploy the airflow instance.
Args:
c: invoke context
rebuild_: rebuild the images prior to deployment
stack: use docker swarm mode
prod: deploy to production
ngrok: deploy locally, but expose to internet via ngrok
"""
configure_prod_or_local(c, prod)
if ngrok:
if rebuild_:
rebuild(c)
atexit.register(c.run, "docker-compose down")
c.run("docker-compose up -d")
c.run("ngrok http 8080", pty=True)
elif prod or stack:
if prod or rebuild_:
rebuild(c)
c.run(
f"docker stack deploy -c docker-compose.yaml -c docker-compose.prod.yaml {c.config.stack_name}"
)
else:
if rebuild_:
rebuild(c)
c.run(f"docker-compose up")
@task(aliases=["down"])
def undeploy(c, prod=False):
"""Tear down the code that's deployed."""
configure_prod_or_local(c, prod)
c.run(f"docker stack remove {c.config.stack_name}")
@task
def status(c, prod=False):
"""View the status of the deployed services."""
configure_prod_or_local(c, prod)
c.run(f"docker service ls")
@task(aliases=["add-ingress"])
def expose_thyself(
c, name=None, rules="tcp:80,tcp:443", service="reverse-proxy"
):
"""Expose our app to the outside world."""
name = c.config.stack_name + "-ingress" if name is None else name
c.run(
f"""
gcloud compute firewall-rules create '{name}' \
--project={c.config.gcp.project} \
--description='Allow ingress into our docker swarm for {service}' \
--direction=INGRESS \
--priority=1000 \
--network=default \
--action=ALLOW \
--rules={rules} \
--target-tags=docker-machine
"""
)
@task(aliases=["rm-ingress"])
def unexpose_thyself(c, name=None):
"""Unexpose our app to the outside world."""
name = c.config.stack_name + "-ingress" if name is not None else name
c.run(f"gcloud compute firewall-rules delete {name}")
@task
def get_swarm_machine_env(c, machine="swarm-manager-0"):
"""Return a swarm machine's docker env vars as a dict."""
result = {}
output = shell(["docker-machine", "env", machine]).stdout
for line in output.splitlines():
if "=" in line:
key, value = line.split("=")
*_, key = key.split()
value = value.strip('"')
result[key] = value
pprint(result)
return result
def configure_prod_or_local(c, prod=False):
"""Configure the execution environment based on whether we're deploying locally or to prod."""
env_vars = (
"DOCKER_TLS_VERIFY",
"DOCKER_HOST",
"DOCKER_CERT_PATH",
"DOCKER_MACHINE_NAME",
)
if prod:
c.config.run.update({"env": get_swarm_machine_env(c)})
else:
for var in (v for v in env_vars if v in os.environ):
os.environ.pop(var)
@task
def encrypt(
c, source, key=None, destination=None, location=None, keyring=None
):
"""
Encrypt a file using google cloud kms.
Args:
source: The source file to be encrypted
key: The name of the key
destination: The file that will be created by encryption
location: gcp zone
keyring: gcp kms keyring
"""
destination = destination if destination is not None else source + ".enc"
location = location if location is not None else c.config.gcp.kms.location
keyring = keyring if keyring is not None else c.config.gcp.kms.keyring
key = key if key is not None else c.config.gcp.kms.key
c.run(
f"gcloud kms encrypt --key={key} --location={location} --keyring={keyring} "
f"--plaintext-file={source} --ciphertext-file={destination}"
)
@task
def encrypt_files(c):
"""
Encrypt files in invoke.yaml encrypt.files
"""
for f in c.config.encrypt.files:
encrypt(c, Path(f).__fspath__())
@task(aliases=["gen-cert"])
def create_certificate(c, days=365):
"""
Generate a TLS certificate and key pair.
Args:
c: invoke context
days: the number of days till your certificate expires
"""
c.run(
f"openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout "
f"reverse-proxy/key.key -out reverse-proxy/certificate.crt"
)
@task
def new_dag(
c,
dag_id=None,
owner=None,
email=None,
start_date=None,
schedule_interval=None,
force=False,
):
"""
Render a new dag and put it in the dags folder.
Args:
c: invoke context
dag_id: i.e. my_dag_v1_p3 (dag_name, version, priority[1-high, 2-med, 3-low])
owner: you
email: your email
start_date: date in iso format
schedule_interval: cron expression
force: overwrite dag module if it exists
"""
yesterday = dt.date.today() - dt.timedelta(days=1)
# v - version
# p - priority (1, 2, 3) == (high, medium, low)
defaults = {
"dag_id": "example_dag_v1_p3",
"owner": "Stephan Fitzpatrick",
"email": "knowsuchagency@gmail.com",
"start_date": yesterday.isoformat(),
"schedule_interval": "0 7 * * *",
}
template_text = read_text("flow_toolz.templates", "dag_template.py")
template = Template(template_text)
args = {}
locals_ = locals()
print(
"rendering your new dag. please enter the following values:",
end=os.linesep * 2,
)
for key, default_value in defaults.items():
explicit_value = locals_[key]
if explicit_value:
args[key] = explicit_value
else:
value = input(f"{key} (default: {default_value}) -> ").strip()
args[key] = value or defaults[key]
rendered_text = template.render(**args)
print()
filename = re.sub(r"_v[^.]+", "", args["dag_id"], re.IGNORECASE) + ".py"
dag_path = Path("airflow", "dags", filename)
if dag_path.exists() and not force:
raise SystemExit(f"{filename} already exists. aborting")
print(f"writing dag to: {dag_path}")
dag_path.write_text(rendered_text + os.linesep)
@task
def connect(c):
"""Connect to airflow deployment."""
manager_ip = c.run("docker-machine ip swarm-manager-0").stdout
c.run(f"open http://{manager_ip}")
@task(aliases=["bootstrap"])
def generate_auth_files(c):
"""Generate ssl keys and other file stubs for authentication purposes."""
create_certificate(c)
for filename in c.config.encrypt.files:
filepath = Path(filename)
if not filepath.exists():
filepath.touch()
| 27.036082 | 107 | 0.573403 |
d4d08769ebd2f9be5103cf28918fa5bfce1d615b | 8,584 | py | Python | src/algorithms/a2c_algorithm/a2c/bipedal_walker_agent.py | mnguyen0226/rl_value_based_vs_value_policy_based | 63c49388996c9b6bbe1d9fb3d302147311c8ad93 | [
"MIT"
] | null | null | null | src/algorithms/a2c_algorithm/a2c/bipedal_walker_agent.py | mnguyen0226/rl_value_based_vs_value_policy_based | 63c49388996c9b6bbe1d9fb3d302147311c8ad93 | [
"MIT"
] | null | null | null | src/algorithms/a2c_algorithm/a2c/bipedal_walker_agent.py | mnguyen0226/rl_value_based_vs_value_policy_based | 63c49388996c9b6bbe1d9fb3d302147311c8ad93 | [
"MIT"
] | null | null | null | import torch
import math
import time
import multiprocessing
from algorithms.utils import to_device
import numpy as np
from algorithms.utils import Memory
# Reference: https://github.com/pythonlessons/Reinforcement_Learning/blob/master/BipedalWalker-v3_PPO/BipedalWalker-v3_PPO.py
# Reference: https://slm-lab.gitbook.io/slm-lab/development/algorithms/a2c
# Reference: https://github.com/Khrylx/PyTorch-RL/blob/master/core/agent.py
# Reference: https://github.com/kengz/SLM-Lab
class BipedalWalkerAgent:
def __init__(
self,
env,
policy,
device,
custom_reward=None,
mean_action=False,
render=False,
running_state=None,
num_threads=1,
):
"""Constructor of BipedalWalkerAgent class
Args:
env: Bipedal Walker environment
policy: Policy network
device: CPU or GPU
custom_reward: customed rewards collected from Critic network. Defaults to None.
mean_action: mean values of action array. Defaults to False.
render: allowance to render. Defaults to False.
running_state: running state. Defaults to None.
num_threads: number of thread run concurrently. Defaults to 1.
"""
self.env = env
self.policy = policy
self.device = device
self.custom_reward = custom_reward
self.mean_action = mean_action
self.running_state = running_state
self.render = render
self.num_threads = num_threads
def collect_samples(self, min_batch_size, render):
"""Collects batches of actions using parallel training if using GPU
This is an override of collect_samples()
Args:
min_batch_size: size of mini batch
render: render state set to False
Returns:
Batch of action and log
"""
self.render = render # set render
t_start = time.time() # record time
to_device(torch.device("cpu"), self.policy) # set cpu
thread_batch_size = int(
math.floor(min_batch_size / self.num_threads)
) # number of threads for batch size
queue = (
multiprocessing.Queue()
) # set parallel multi processing if able to run multicore
workers = [] # number of worker
for i in range(self.num_threads - 1):
worker_args = (
i + 1,
queue,
self.env,
self.policy,
self.custom_reward,
self.mean_action,
False,
self.running_state,
thread_batch_size,
)
workers.append(
multiprocessing.Process(target=collect_samples, args=worker_args)
)
for worker in workers:
worker.start()
# recursive call collect_sample()
memory, log = collect_samples(
rand_init=0,
queue=None,
env=self.env,
policy=self.policy,
custom_reward=self.custom_reward,
mean_action=self.mean_action,
render=self.render,
running_state=self.running_state,
min_batch_size=thread_batch_size,
)
worker_logs = [None] * len(workers)
worker_memories = [None] * len(workers)
for _ in workers:
rand_init, worker_memory, worker_log = queue.get()
worker_memories[rand_init - 1] = worker_memory
worker_logs[rand_init - 1] = worker_log
for worker_memory in worker_memories:
memory.append(worker_memory)
batch = memory.sample()
if self.num_threads > 1:
log_list = [log] + worker_logs
log = concat_log(log_list)
to_device(self.device, self.policy)
t_end = time.time()
# save sampling time, mean, min, max action values
log["sample_time"] = t_end - t_start
log["action_mean"] = np.mean(np.vstack(batch.action), axis=0)
log["action_min"] = np.min(np.vstack(batch.action), axis=0)
log["action_max"] = np.max(np.vstack(batch.action), axis=0)
return batch, log
def collect_samples(
rand_init,
queue,
env,
policy,
custom_reward,
mean_action,
render,
running_state,
min_batch_size,
):
"""Helper function - Collect batch of action and log depends on the batch size
Args:
rand_init: random initialization for reproducibility
queue: queue
env: Bipedal Walker v2
policy: Policy network
custom_reward: customed reward
mean_action: mean of action values
render: allowance for render
running_state: running state
min_batch_size: mini batch size
Returns:
Memory array and log array
"""
torch.randn(rand_init)
log = dict()
memory = Memory()
num_steps = 0
total_reward = 0
min_reward = 1e6
max_reward = -1e6
total_c_reward = 0
min_c_reward = 1e6
max_c_reward = -1e6
num_episodes = 0
while num_steps < min_batch_size: # execute while not exceed the min batch size
state = env.reset()
if running_state is not None: # if there is running state
state = running_state(state)
reward_episode = 0 # initialize reward
for t in range(10000): # for 10000 time step
state_var = torch.Tensor(state).unsqueeze(0)
with torch.no_grad():
if (
mean_action
): # if there is a mean action value, then take action according to the policy network
action = policy(state_var)[0][0].numpy()
else:
action = policy.select_action(state_var)[0].numpy()
action = (
int(action) if policy.is_disc_action else action.astype(np.float64)
) # convert action to int or float
next_state, reward, done, _ = env.step(
action
) # takle action amd cp;;ect omfp
reward_episode += reward # collect reward
if running_state is not None:
next_state = running_state(next_state)
if custom_reward is not None:
reward = custom_reward(state, action)
total_c_reward += reward
min_c_reward = min(min_c_reward, reward)
max_c_reward = max(max_c_reward, reward)
mask = 0 if done else 1
memory.push(state, action, mask, next_state, reward)
if render:
env.render()
if done:
break
state = next_state
# update statisitics
num_steps += t + 1
num_episodes += 1
total_reward += reward_episode
min_reward = min(min_reward, reward_episode)
max_reward = max(max_reward, reward_episode)
# log statistic
log["num_steps"] = num_steps
log["num_episodes"] = num_episodes
log["total_reward"] = total_reward
log["avg_reward"] = total_reward / num_episodes
log["max_reward"] = max_reward
log["min_reward"] = min_reward
if (
custom_reward is not None
): # if there is custom reward produced by the critic network
log["total_c_reward"] = total_c_reward
log["avg_c_reward"] = total_c_reward / num_steps
log["max_c_reward"] = max_c_reward
log["min_c_reward"] = min_c_reward
if queue is not None:
queue.put([rand_init, memory, log])
else:
return memory, log
def concat_log(log_list):
"""Helper funcition - merges saved log
Args:
log_list: list of log values
Returns:
Log array
"""
log = dict()
log["total_reward"] = sum([x["total_reward"] for x in log_list])
log["num_episodes"] = sum([x["num_episodes"] for x in log_list])
log["num_steps"] = sum([x["num_steps"] for x in log_list])
log["avg_reward"] = log["total_reward"] / log["num_episodes"]
log["max_reward"] = max([x["max_reward"] for x in log_list])
log["min_reward"] = min([x["min_reward"] for x in log_list])
if "total_c_reward" in log_list[0]:
log["total_c_reward"] = sum([x["total_c_reward"] for x in log_list])
log["avg_c_reward"] = log["total_c_reward"] / log["num_steps"]
log["max_c_reward"] = max([x["max_c_reward"] for x in log_list])
log["min_c_reward"] = min([x["min_c_reward"] for x in log_list])
return log
| 32.149813 | 125 | 0.596692 |
eb6cc55c566f1a2f5efc92ad36e74c79cf40334d | 4,180 | py | Python | CoGanh/pytorch/NNet.py | vietnguyen2000/Reversi | d920f574853f359d6f6e2c436bdd951b1759dcd7 | [
"MIT"
] | null | null | null | CoGanh/pytorch/NNet.py | vietnguyen2000/Reversi | d920f574853f359d6f6e2c436bdd951b1759dcd7 | [
"MIT"
] | 2 | 2021-07-18T13:18:00.000Z | 2021-07-18T13:19:35.000Z | CoGanh/pytorch/NNet.py | vietnguyen2000/Reversi | d920f574853f359d6f6e2c436bdd951b1759dcd7 | [
"MIT"
] | null | null | null | import os
import sys
import time
import numpy as np
from tqdm import tqdm
sys.path.append('../../')
from utils import *
from NeuralNet import NeuralNet
import torch
import torch.optim as optim
from .CoGanhNNet import CoGanhNNet as onnet
args = dotdict({
'lr': 0.001,
'dropout': 0.3,
'epochs': 10,
'batch_size': 64,
'cuda': torch.cuda.is_available(),
'num_channels': 512,
})
class NNetWrapper(NeuralNet):
def __init__(self, game):
self.nnet = onnet(game, args)
self.board_x, self.board_y = game.getBoardSize()
self.action_size = game.getActionSize()
if args.cuda:
self.nnet.cuda()
def train(self, examples):
"""
examples: list of examples, each example is of form (board, pi, v)
"""
optimizer = optim.Adam(self.nnet.parameters())
for epoch in range(args.epochs):
print('EPOCH ::: ' + str(epoch + 1))
self.nnet.train()
pi_losses = AverageMeter()
v_losses = AverageMeter()
batch_count = int(len(examples) / args.batch_size)
t = tqdm(range(batch_count), desc='Training Net')
for _ in t:
sample_ids = np.random.randint(len(examples), size=args.batch_size)
boards, pis, vs = list(zip(*[examples[i] for i in sample_ids]))
boards = torch.FloatTensor(np.array(list(map(lambda board: board.astype(np.float64), boards))))
target_pis = torch.FloatTensor(np.array(pis))
target_vs = torch.FloatTensor(np.array(vs).astype(np.float64))
# predict
if args.cuda:
boards, target_pis, target_vs = boards.contiguous().cuda(), target_pis.contiguous().cuda(), target_vs.contiguous().cuda()
# compute output
out_pi, out_v = self.nnet(boards)
l_pi = self.loss_pi(target_pis, out_pi)
l_v = self.loss_v(target_vs, out_v)
total_loss = l_pi + l_v
# record loss
pi_losses.update(l_pi.item(), boards.size(0))
v_losses.update(l_v.item(), boards.size(0))
t.set_postfix(Loss_pi=pi_losses, Loss_v=v_losses)
# compute gradient and do SGD step
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
def predict(self, board):
"""
board: np array with board
"""
# timing
start = time.time()
# preparing input
board = torch.FloatTensor(board.astype(np.float64))
if args.cuda: board = board.contiguous().cuda()
board = board.view(1, self.board_x, self.board_y)
self.nnet.eval()
with torch.no_grad():
pi, v = self.nnet(board)
# print('PREDICTION TIME TAKEN : {0:03f}'.format(time.time()-start))
return torch.exp(pi).data.cpu().numpy()[0], v.data.cpu().numpy()[0]
def loss_pi(self, targets, outputs):
return -torch.sum(targets * outputs) / targets.size()[0]
def loss_v(self, targets, outputs):
return torch.sum((targets - outputs.view(-1)) ** 2) / targets.size()[0]
def save_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(folder, filename)
if not os.path.exists(folder):
print("Checkpoint Directory does not exist! Making directory {}".format(folder))
os.mkdir(folder)
else:
print("Checkpoint Directory exists! ")
torch.save({
'state_dict': self.nnet.state_dict(),
}, filepath)
def load_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):
# https://github.com/pytorch/examples/blob/master/imagenet/main.py#L98
filepath = os.path.join(folder, filename)
if not os.path.exists(filepath):
raise ("No model in path {}".format(filepath))
map_location = None if args.cuda else 'cpu'
checkpoint = torch.load(filepath, map_location=map_location)
self.nnet.load_state_dict(checkpoint['state_dict'])
| 34.262295 | 141 | 0.586603 |
f04bcab66a3b1cd8218a459d28674499bbf0e175 | 160 | py | Python | yt/frontends/athena/api.py | Xarthisius/yt | 321643c3abff64a6f132d98d0747f3558f7552a3 | [
"BSD-3-Clause-Clear"
] | 360 | 2017-04-24T05:06:04.000Z | 2022-03-31T10:47:07.000Z | yt/frontends/athena/api.py | Xarthisius/yt | 321643c3abff64a6f132d98d0747f3558f7552a3 | [
"BSD-3-Clause-Clear"
] | 2,077 | 2017-04-20T20:36:07.000Z | 2022-03-31T16:39:43.000Z | yt/frontends/athena/api.py | stonnes/yt | aad3cfa3b4ebab7838352ab467275a27c26ff363 | [
"BSD-3-Clause-Clear"
] | 257 | 2017-04-19T20:52:28.000Z | 2022-03-29T12:23:52.000Z | from . import tests
from .data_structures import AthenaDataset, AthenaGrid, AthenaHierarchy
from .fields import AthenaFieldInfo
from .io import IOHandlerAthena
| 32 | 71 | 0.85 |
f336b3d3ebedfc8fb926a2206f15497b5ef28776 | 2,858 | py | Python | launch/obstacles.py | FaHoLo/Rocket_launch | 5c3c74ba7f6f8ab454bf875421c91a2d15ff16d8 | [
"MIT"
] | null | null | null | launch/obstacles.py | FaHoLo/Rocket_launch | 5c3c74ba7f6f8ab454bf875421c91a2d15ff16d8 | [
"MIT"
] | null | null | null | launch/obstacles.py | FaHoLo/Rocket_launch | 5c3c74ba7f6f8ab454bf875421c91a2d15ff16d8 | [
"MIT"
] | null | null | null | import asyncio
from utils import draw_frame
class Obstacle:
def __init__(self, row, column, rows_size=1, columns_size=1, uid=None):
self.row = row
self.column = column
self.rows_size = rows_size
self.columns_size = columns_size
self.uid = uid
def get_bounding_box_frame(self):
# increment box size to compensate obstacle movement
rows, columns = self.rows_size + 1, self.columns_size + 1
return '\n'.join(_get_bounding_box_lines(rows, columns))
def get_bounding_box_corner_pos(self):
return self.row - 1, self.column - 1
def dump_bounding_box(self):
row, column = self.get_bounding_box_corner_pos()
return row, column, self.get_bounding_box_frame()
def has_collision(self, obj_corner_row, obj_corner_column, obj_size_rows=1, obj_size_columns=1):
'''Determine if collision has occured. Return True or False.'''
return has_collision(
(self.row, self.column),
(self.rows_size, self.columns_size),
(obj_corner_row, obj_corner_column),
(obj_size_rows, obj_size_columns),
)
def _get_bounding_box_lines(rows, columns):
yield ' ' + '-' * columns + ' '
for _ in range(rows):
yield '|' + ' ' * columns + '|'
yield ' ' + '-' * columns + ' '
async def show_obstacles(canvas, obstacles):
"""Display bounding boxes of every obstacle in a list"""
while True:
boxes = []
for obstacle in obstacles:
boxes.append(obstacle.dump_bounding_box())
for row, column, frame in boxes:
draw_frame(canvas, row, column, frame)
await asyncio.sleep(0)
for row, column, frame in boxes:
draw_frame(canvas, row, column, frame, negative=True)
def _is_point_inside(corner_row, corner_column, size_rows, size_columns, point_row,
point_row_column):
rows_flag = corner_row <= point_row < corner_row + size_rows
columns_flag = corner_column <= point_row_column < corner_column + size_columns
return rows_flag and columns_flag
def has_collision(obstacle_corner, obstacle_size, obj_corner, obj_size=(1, 1)):
'''Determine if collision has occured. Return True or False.'''
opposite_obstacle_corner = (
obstacle_corner[0] + obstacle_size[0] - 1,
obstacle_corner[1] + obstacle_size[1] - 1,
)
opposite_obj_corner = (
obj_corner[0] + obj_size[0] - 1,
obj_corner[1] + obj_size[1] - 1,
)
return any([
_is_point_inside(*obstacle_corner, *obstacle_size, *obj_corner),
_is_point_inside(*obstacle_corner, *obstacle_size, *opposite_obj_corner),
_is_point_inside(*obj_corner, *obj_size, *obstacle_corner),
_is_point_inside(*obj_corner, *obj_size, *opposite_obstacle_corner),
])
| 31.406593 | 100 | 0.653604 |
da9d20770908712b38c3e472be0ce8609dd0f48b | 746 | py | Python | framework/dataset.py | Jincheng-Sun/Kylearn | ba6cd80c97e155d4bb4791b3ae74135bc1365064 | [
"Apache-2.0"
] | null | null | null | framework/dataset.py | Jincheng-Sun/Kylearn | ba6cd80c97e155d4bb4791b3ae74135bc1365064 | [
"Apache-2.0"
] | null | null | null | framework/dataset.py | Jincheng-Sun/Kylearn | ba6cd80c97e155d4bb4791b3ae74135bc1365064 | [
"Apache-2.0"
] | 3 | 2019-07-07T06:37:52.000Z | 2019-09-10T20:32:34.000Z | import numpy as np
from utils.mini_batch import random_index
class Dataset:
def __init__(self, **kwargs):
self.train_set = None
self.eval_set = None
self.test_set = None
def evaluation_generator(self, batch_size=100):
print(len(self.eval_set))
def generate():
for idx in range(0, len(self.eval_set), batch_size):
print(idx)
yield self.eval_set[idx:(idx + batch_size)]
return generate
def training_generator(self, batch_size=100, random=np.random):
assert batch_size > 0 and len(self.train_set) > 0
for batch_idxs in random_index(len(self.train_set), batch_size, random):
yield self.train_set[batch_idxs]
| 26.642857 | 80 | 0.636729 |
15ee06ae5d68c6219144e8165502ffb0d7a03a0a | 882 | py | Python | blockchain_base/readFAT.py | benevolentPreta/49900 | 77d6d36fca17876e588b7ab0858c334dc5f871de | [
"BSD-2-Clause"
] | null | null | null | blockchain_base/readFAT.py | benevolentPreta/49900 | 77d6d36fca17876e588b7ab0858c334dc5f871de | [
"BSD-2-Clause"
] | null | null | null | blockchain_base/readFAT.py | benevolentPreta/49900 | 77d6d36fca17876e588b7ab0858c334dc5f871de | [
"BSD-2-Clause"
] | null | null | null | from blockchain import Blockchain
local_blockchain = Blockchain()
# Note 'fat.txt' must be located in the same directory as readFAT.py
def add_block_from_file():
with open('fat.txt', 'r') as FAT:
data = FAT.read()
local_blockchain.add_block(data)
# save blockchain to file
def saveBlockchain():
file = open('blockchain.txt', 'w')
for i in range(len(local_blockchain.chain)):
current_block = local_blockchain.chain[i]
file.write(str(current_block.time_stamp))
file.write('\n')
file.write(str(current_block.transactions))
file.write('\n')
file.write(str(current_block.generate_hash))
file.write('\n')
file.write(str(current_block.previous_hash))
file.write('\n===\n')
# print blockchain after adding new transaction
local_blockchain.print_blocks()
| 29.4 | 69 | 0.653061 |
8687be2904ec8ef1773b10209d9089232c101491 | 894 | py | Python | pip_http_cfb.py | YulongWu/my-utils | 37dff5abd6e27b69ee04ff8c194fcf2fb5178360 | [
"MIT"
] | null | null | null | pip_http_cfb.py | YulongWu/my-utils | 37dff5abd6e27b69ee04ff8c194fcf2fb5178360 | [
"MIT"
] | null | null | null | pip_http_cfb.py | YulongWu/my-utils | 37dff5abd6e27b69ee04ff8c194fcf2fb5178360 | [
"MIT"
] | null | null | null | #coding: u8
import sys
reload(sys)
sys.setdefaultencoding('u8')
from sets import Set
import urllib
import urllib2
import json
# call format:
ids = Set()
# call format: cat data_evaluate/top500.v2.type_heshe.docid.txt | python ~/gitsvnlib/YulongUtils/http_dataFetcher.py > data_evaluate/top500.v2.type_heshe.docid.result.json
if __name__ == "__main__":
serving_url = "http://10.111.0.56:8029/clickfeedback/neo" #url of cfb info
for linecount, line in enumerate(sys.stdin):
if linecount % 10000 == 0:
print >> sys.stderr, "Processed {0} lines...".format(linecount)
id = line.strip()
try:
req_url = serving_url + '?' + urllib.urlencode({'docids':id})
print urllib2.urlopen(req_url).read(),
except Exception, e:
print >> sys.stderr, "Error occured for docid: " + id
print >> sys.stderr, e
| 28.83871 | 171 | 0.64877 |
a3daf966398100f2fad450bb08cece866fa135f3 | 3,026 | py | Python | pype/plugins/maya/publish/extract_animation.py | tokejepsen/pype | 8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3 | [
"MIT"
] | null | null | null | pype/plugins/maya/publish/extract_animation.py | tokejepsen/pype | 8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3 | [
"MIT"
] | null | null | null | pype/plugins/maya/publish/extract_animation.py | tokejepsen/pype | 8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3 | [
"MIT"
] | null | null | null | import os
from maya import cmds
import avalon.maya
import pype.api
from pype.maya.lib import extract_alembic
class ExtractAnimation(pype.api.Extractor):
"""Produce an alembic of just point positions and normals.
Positions and normals, uvs, creases are preserved, but nothing more,
for plain and predictable point caches.
"""
label = "Extract Animation"
hosts = ["maya"]
families = ["animation"]
def process(self, instance):
# Collect the out set nodes
out_sets = [node for node in instance if node.endswith("out_SET")]
if len(out_sets) != 1:
raise RuntimeError("Couldn't find exactly one out_SET: "
"{0}".format(out_sets))
out_set = out_sets[0]
roots = cmds.sets(out_set, query=True)
# Include all descendants
nodes = roots + cmds.listRelatives(roots,
allDescendents=True,
fullPath=True) or []
# Collect the start and end including handles
start = instance.data["frameStart"]
end = instance.data["frameEnd"]
handles = instance.data.get("handles", 0)
if handles:
start -= handles
end += handles
self.log.info("Extracting animation..")
dirname = self.staging_dir(instance)
parent_dir = self.staging_dir(instance)
filename = "{name}.abc".format(**instance.data)
path = os.path.join(parent_dir, filename)
options = {
"step": instance.data.get("step", 1.0),
"attr": ["cbId"],
"writeVisibility": True,
"writeCreases": True,
"uvWrite": True,
"selection": True,
"worldSpace": instance.data.get("worldSpace", True)
}
if not instance.data.get("includeParentHierarchy", True):
# Set the root nodes if we don't want to include parents
# The roots are to be considered the ones that are the actual
# direct members of the set
options["root"] = roots
if int(cmds.about(version=True)) >= 2017:
# Since Maya 2017 alembic supports multiple uv sets - write them.
options["writeUVSets"] = True
with avalon.maya.suspended_refresh():
with avalon.maya.maintained_selection():
cmds.select(nodes, noExpand=True)
extract_alembic(file=path,
startFrame=start,
endFrame=end,
**options)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'abc',
'ext': 'abc',
'files': filename,
"stagingDir": dirname,
}
instance.data["representations"].append(representation)
self.log.info("Extracted {} to {}".format(instance, dirname))
| 32.891304 | 77 | 0.555849 |
796e30e65a74232dd879a0e86b919b3c9324935c | 11,119 | py | Python | security_monkey/tests/auditors/test_elasticsearch_service.py | cncoder/security_monkey | 7d14c00e6c18a0edf87830ff4191007b6296b945 | [
"Apache-2.0"
] | null | null | null | security_monkey/tests/auditors/test_elasticsearch_service.py | cncoder/security_monkey | 7d14c00e6c18a0edf87830ff4191007b6296b945 | [
"Apache-2.0"
] | null | null | null | security_monkey/tests/auditors/test_elasticsearch_service.py | cncoder/security_monkey | 7d14c00e6c18a0edf87830ff4191007b6296b945 | [
"Apache-2.0"
] | 1 | 2019-06-15T14:03:34.000Z | 2019-06-15T14:03:34.000Z | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.tests.auditors.test_elasticsearch_service
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Mike Grima <mgrima@netflix.com>
"""
import json
from security_monkey.datastore import NetworkWhitelistEntry, Account, AccountType
from security_monkey.tests import SecurityMonkeyTestCase
from security_monkey import db
# TODO: Make a ES test for spulec/moto, then make test cases that use it.
from security_monkey.watchers.elasticsearch_service import ElasticSearchServiceItem
CONFIG_ONE = {
"name": "es_test",
"policy": json.loads(b"""{
"Statement": [
{
"Action": "es:*",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Resource": "arn:aws-cn:es:cn-north-1:012345678910:domain/es_test/*",
"Sid": ""
}
],
"Version": "2012-10-17"
}
""")
}
CONFIG_TWO = {
"name": "es_test_2",
"policy": json.loads(b"""{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": "*",
"Action": "es:*",
"Resource": "arn:aws-cn:es:us-west-2:012345678910:domain/es_test_2/*"
}
]
}
""")
}
CONFIG_THREE = {
"name": "es_test_3",
"policy": json.loads(b"""{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws-cn:iam::012345678910:root"
},
"Action": "es:*",
"Resource": "arn:aws-cn:es:eu-west-1:012345678910:domain/es_test_3/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": "*",
"Action": "es:ESHttp*",
"Resource": "arn:aws-cn:es:eu-west-1:012345678910:domain/es_test_3/*",
"Condition": {
"IpAddress": {
"aws:SourceIp": [
"192.168.1.1/32",
"10.0.0.1/8"
]
}
}
}
]
}
""")
}
CONFIG_FOUR = {
"name": "es_test_4",
"policy": json.loads(b"""{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws-cn:iam::012345678910:root"
},
"Action": "es:*",
"Resource": "arn:aws-cn:es:cn-north-1:012345678910:domain/es_test_4/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": "*",
"Action": "es:ESHttp*",
"Resource": "arn:aws-cn:es:cn-north-1:012345678910:domain/es_test_4/*",
"Condition": {
"IpAddress": {
"aws:SourceIp": [
"0.0.0.0/0"
]
}
}
}
]
}
""")
}
CONFIG_FIVE = {
"name": "es_test_5",
"policy": json.loads(b"""{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws-cn:iam::012345678910:root"
},
"Action": "es:*",
"Resource": "arn:aws-cn:es:cn-north-1:012345678910:domain/es_test_5/*"
},
{
"Sid": "",
"Effect": "Deny",
"Principal": {
"AWS": "arn:aws-cn:iam::012345678910:role/not_this_role"
},
"Action": "es:*",
"Resource": "arn:aws-cn:es:cn-north-1:012345678910:domain/es_test_5/*"
}
]
}
""")
}
CONFIG_SIX = {
"name": "es_test_6",
"policy": json.loads(b"""{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws-cn:iam::012345678910:role/a_good_role"
},
"Action": "es:*",
"Resource": "arn:aws-cn:es:eu-west-1:012345678910:domain/es_test_6/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": "*",
"Action": "es:ESHttp*",
"Resource": "arn:aws-cn:es:eu-west-1:012345678910:domain/es_test_6/*",
"Condition": {
"IpAddress": {
"aws:SourceIp": [
"192.168.1.1/32",
"100.0.0.1/16"
]
}
}
}
]
}
""")
}
CONFIG_SEVEN = {
"name": "es_test_7",
"policy": json.loads(b"""{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws-cn:iam::012345678910:role/a_good_role"
},
"Action": "es:*",
"Resource": "arn:aws-cn:es:eu-west-1:012345678910:domain/es_test_7/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": "*",
"Action": "es:ESHttp*",
"Resource": "arn:aws-cn:es:eu-west-1:012345678910:domain/es_test_7/*",
"Condition": {
"IpAddress": {
"aws:SourceIp": [
"192.168.1.200/32",
"10.0.0.1/8"
]
}
}
}
]
}
""")
}
CONFIG_EIGHT = {
"name": "es_test_8",
"policy": json.loads(b"""{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "es:*",
"Resource": "arn:aws-cn:es:eu-west-1:012345678910:domain/es_test_8/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": "*",
"Action": "es:ESHttp*",
"Resource": "arn:aws-cn:es:eu-west-1:012345678910:domain/es_test_8/*",
"Condition": {
"IpAddress": {
"aws:SourceIp": [
"192.168.1.1/32",
"100.0.0.1/16"
]
}
}
}
]
}
""")
}
CONFIG_NINE = {
"name": "es_test_9",
"policy": json.loads(b"""{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws-cn:iam::111111111111:root"
},
"Action": "es:*",
"Resource": "arn:aws-cn:es:cn-north-1:012345678910:domain/es_test_9/*"
}
]
}
""")
}
WHITELIST_CIDRS = [
("Test one", "192.168.1.1/32"),
("Test two", "100.0.0.1/16"),
]
class ElasticSearchServiceTestCase(SecurityMonkeyTestCase):
def pre_test_setup(self):
self.es_items = [
ElasticSearchServiceItem(region="cn-north-1", account="TEST_ACCOUNT", name="es_test", config=CONFIG_ONE),
ElasticSearchServiceItem(region="us-west-2", account="TEST_ACCOUNT", name="es_test_2", config=CONFIG_TWO),
ElasticSearchServiceItem(region="eu-west-1", account="TEST_ACCOUNT", name="es_test_3", config=CONFIG_THREE),
ElasticSearchServiceItem(region="cn-north-1", account="TEST_ACCOUNT", name="es_test_4", config=CONFIG_FOUR),
ElasticSearchServiceItem(region="cn-north-1", account="TEST_ACCOUNT", name="es_test_5", config=CONFIG_FIVE),
ElasticSearchServiceItem(region="eu-west-1", account="TEST_ACCOUNT", name="es_test_6", config=CONFIG_SIX),
ElasticSearchServiceItem(region="eu-west-1", account="TEST_ACCOUNT", name="es_test_7", config=CONFIG_SEVEN),
ElasticSearchServiceItem(region="eu-west-1", account="TEST_ACCOUNT", name="es_test_8", config=CONFIG_EIGHT),
ElasticSearchServiceItem(region="cn-north-1", account="TEST_ACCOUNT", name="es_test_9", config=CONFIG_NINE),
]
account_type_result = AccountType(name='AWS')
db.session.add(account_type_result)
db.session.commit()
account = Account(identifier="012345678910", name="TEST_ACCOUNT",
account_type_id=account_type_result.id, notes="TEST_ACCOUNT",
third_party=False, active=True)
db.session.add(account)
db.session.commit()
def test_es_auditor(self):
from security_monkey.auditors.elasticsearch_service import ElasticSearchServiceAuditor
es_auditor = ElasticSearchServiceAuditor(accounts=["012345678910"])
# Add some test network whitelists into this:
es_auditor.network_whitelist = []
for cidr in WHITELIST_CIDRS:
whitelist_cidr = NetworkWhitelistEntry()
whitelist_cidr.cidr = cidr[1]
whitelist_cidr.name = cidr[0]
es_auditor.network_whitelist.append(whitelist_cidr)
for es_domain in self.es_items:
es_auditor.check_es_access_policy(es_domain)
# Check for correct number of issues located:
# CONFIG ONE:
self.assertEquals(len(self.es_items[0].audit_issues), 1)
self.assertEquals(self.es_items[0].audit_issues[0].score, 20)
# CONFIG TWO:
self.assertEquals(len(self.es_items[1].audit_issues), 1)
self.assertEquals(self.es_items[1].audit_issues[0].score, 20)
# CONFIG THREE:
self.assertEquals(len(self.es_items[2].audit_issues), 2)
self.assertEquals(self.es_items[2].audit_issues[0].score, 5)
self.assertEquals(self.es_items[2].audit_issues[1].score, 7)
# CONFIG FOUR:
self.assertEquals(len(self.es_items[3].audit_issues), 1)
self.assertEquals(self.es_items[3].audit_issues[0].score, 20)
# CONFIG FIVE:
self.assertEquals(len(self.es_items[4].audit_issues), 0)
# CONFIG SIX:
self.assertEquals(len(self.es_items[5].audit_issues), 0)
# CONFIG SEVEN:
self.assertEquals(len(self.es_items[6].audit_issues), 3)
self.assertEquals(self.es_items[6].audit_issues[0].score, 5)
self.assertEquals(self.es_items[6].audit_issues[1].score, 5)
self.assertEquals(self.es_items[6].audit_issues[2].score, 7)
# CONFIG EIGHT:
self.assertEquals(len(self.es_items[7].audit_issues), 1)
self.assertEquals(self.es_items[7].audit_issues[0].score, 20)
# CONFIG NINE:
self.assertEquals(len(self.es_items[8].audit_issues), 2)
self.assertEquals(self.es_items[8].audit_issues[0].score, 6)
self.assertEquals(self.es_items[8].audit_issues[1].score, 10)
| 30.051351 | 120 | 0.525587 |
3b62c7e55ca3c87ae0941699748e5ebc837b4f65 | 728 | py | Python | emit_logs.py | it490-wizards/logging-system | 19ba89315f1a4088adc595ce27687dc8688a8926 | [
"MIT"
] | null | null | null | emit_logs.py | it490-wizards/logging-system | 19ba89315f1a4088adc595ce27687dc8688a8926 | [
"MIT"
] | 1 | 2021-11-04T16:28:08.000Z | 2021-11-04T16:28:08.000Z | emit_logs.py | it490-wizards/logging-system | 19ba89315f1a4088adc595ce27687dc8688a8926 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import sys
import dotenv
import pika
dotenv.load_dotenv()
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=os.getenv("PIKA_HOST"),
port=os.getenv("PIKA_PORT"),
virtual_host=os.getenv("PIKA_VIRTUAL_HOST"),
credentials=pika.PlainCredentials(
username=os.getenv("PIKA_USERNAME"),
password=os.getenv("PIKA_PASSWORD"),
),
)
)
channel = connection.channel()
channel.exchange_declare(exchange="logs", exchange_type="fanout")
message = " ".join(sys.argv[1:]) or "info: Hello World!"
channel.basic_publish(exchange="logs", routing_key="", body=message)
print(" [x] Sent %r" % message)
connection.close()
| 23.483871 | 68 | 0.681319 |
30759d17ffae0ed3731b72b481eeeacaa8c09464 | 378 | py | Python | TRANSFORM/Resources/Scripts/jModelica/Fluid/TraceComponents/Examples/TraceSeparator_Test.py | greenwoodms/TRANSFORM-Library | dc152d4f0298d3f18385f2ea33645d87d7812915 | [
"Apache-2.0"
] | 29 | 2018-04-24T17:06:19.000Z | 2021-11-21T05:17:28.000Z | TRANSFORM/Resources/Scripts/jModelica/Fluid/TraceComponents/Examples/TraceSeparator_Test.py | greenwoodms/TRANSFORM-Library | dc152d4f0298d3f18385f2ea33645d87d7812915 | [
"Apache-2.0"
] | 13 | 2018-04-05T08:34:27.000Z | 2021-10-04T14:24:41.000Z | TRANSFORM/Resources/Scripts/jModelica/Fluid/TraceComponents/Examples/TraceSeparator_Test.py | greenwoodms/TRANSFORM-Library | dc152d4f0298d3f18385f2ea33645d87d7812915 | [
"Apache-2.0"
] | 17 | 2018-08-06T22:18:01.000Z | 2022-01-29T21:38:17.000Z | from pymodelica import compile_fmu
from pyfmi import load_fmu
libPath = r'C:\Users\vmg\Documents\Modelica\TRANSFORM-Library/TRANSFORM'
modelName = 'TRANSFORM.Fluid.TraceComponents.Examples.TraceSeparator_Test'
fmu = compile_fmu(modelName,libPath,target='cs')
model = load_fmu(fmu)
opts = model.simulate_options()
opts['time_limit'] = 60
results=model.simulate(options=opts)
| 27 | 74 | 0.804233 |
7554ca373efa1f2003d3a5588e1d0731c8ac1a3d | 1,267 | py | Python | envisage/tests/eggs/acme.bar/acme/bar/bar_plugin.py | enthought/envisage | ca57225c4e9022d1ed5299a60e13dc2290d7d94e | [
"BSD-3-Clause"
] | 51 | 2015-05-12T01:34:15.000Z | 2022-03-20T19:11:22.000Z | envisage/tests/eggs/acme.bar/acme/bar/bar_plugin.py | enthought/envisage | ca57225c4e9022d1ed5299a60e13dc2290d7d94e | [
"BSD-3-Clause"
] | 347 | 2015-02-27T19:51:09.000Z | 2022-03-21T16:03:01.000Z | envisage/tests/eggs/acme.bar/acme/bar/bar_plugin.py | enthought/envisage | ca57225c4e9022d1ed5299a60e13dc2290d7d94e | [
"BSD-3-Clause"
] | 11 | 2015-02-11T04:32:54.000Z | 2021-09-13T10:50:05.000Z | # (C) Copyright 2007-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
""" The 'Bar' plugin """
# Enthought library imports.
from envisage.api import Plugin
from traits.api import Bool
class BarPlugin(Plugin):
""" The 'Bar' plugin """
#### 'IPlugin' interface ##################################################
# The plugin's unique identifier.
id = "acme.bar"
#### 'BarPlugin' interface ################################################
started = Bool(False)
stopped = Bool(False)
###########################################################################
# 'IPlugin' interface.
###########################################################################
def start(self):
""" Start the plugin. """
self.started = True
self.stopped = False
def stop(self):
""" Stop the plugin. """
self.started = False
self.stopped = True
| 27.543478 | 79 | 0.523283 |
af548069102b1e1044ae1af107914d13e27da094 | 7,538 | py | Python | pw_build/py/pw_build/generated_tests.py | curtin-space/pigweed | fe2e1743e03fabd2676f01d9de0ac9d34a426076 | [
"Apache-2.0"
] | 86 | 2021-03-09T23:49:40.000Z | 2022-03-30T08:14:51.000Z | pw_build/py/pw_build/generated_tests.py | curtin-space/pigweed | fe2e1743e03fabd2676f01d9de0ac9d34a426076 | [
"Apache-2.0"
] | 4 | 2021-07-27T20:32:03.000Z | 2022-03-08T10:39:07.000Z | pw_build/py/pw_build/generated_tests.py | curtin-space/pigweed | fe2e1743e03fabd2676f01d9de0ac9d34a426076 | [
"Apache-2.0"
] | 22 | 2021-03-11T15:15:47.000Z | 2022-02-09T06:16:36.000Z | # Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Tools for generating Pigweed tests that execute in C++ and Python."""
import argparse
from dataclasses import dataclass
from datetime import datetime
from collections import defaultdict
import unittest
from typing import (Any, Callable, Dict, Generic, Iterable, Iterator, List,
Sequence, TextIO, TypeVar, Union)
_COPYRIGHT = f"""\
// Copyright {datetime.now().year} The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
// AUTOGENERATED - DO NOT EDIT
//
// Generated at {datetime.now().isoformat()}
"""
_HEADER_CPP = _COPYRIGHT + """\
// clang-format off
"""
_HEADER_JS = _COPYRIGHT + """\
/* eslint-env browser, jasmine */
"""
class Error(Exception):
"""Something went wrong when generating tests."""
T = TypeVar('T')
@dataclass
class Context(Generic[T]):
"""Info passed into test generator functions for each test case."""
group: str
count: int
total: int
test_case: T
def cc_name(self) -> str:
name = ''.join(w.capitalize()
for w in self.group.replace('-', ' ').split(' '))
name = ''.join(c if c.isalnum() else '_' for c in name)
return f'{name}_{self.count}' if self.total > 1 else name
def py_name(self) -> str:
name = 'test_' + ''.join(c if c.isalnum() else '_'
for c in self.group.lower())
return f'{name}_{self.count}' if self.total > 1 else name
def ts_name(self) -> str:
name = ''.join(c if c.isalnum() else ' ' for c in self.group.lower())
return f'{name} {self.count}' if self.total > 1 else name
# Test cases are specified as a sequence of strings or test case instances. The
# strings are used to separate the tests into named groups. For example:
#
# STR_SPLIT_TEST_CASES = (
# 'Empty input',
# MyTestCase('', '', []),
# MyTestCase('', 'foo', []),
# 'Split on single character',
# MyTestCase('abcde', 'c', ['ab', 'de']),
# ...
# )
#
GroupOrTest = Union[str, T]
# Python tests are generated by a function that returns a function usable as a
# unittest.TestCase method.
PyTest = Callable[[unittest.TestCase], None]
PyTestGenerator = Callable[[Context[T]], PyTest]
# C++ tests are generated with a function that returns or yields lines of C++
# code for the given test case.
CcTestGenerator = Callable[[Context[T]], Iterable[str]]
JsTestGenerator = Callable[[Context[T]], Iterable[str]]
class TestGenerator(Generic[T]):
"""Generates tests for multiple languages from a series of test cases."""
def __init__(self, test_cases: Sequence[GroupOrTest[T]]):
self._cases: Dict[str, List[T]] = defaultdict(list)
message = ''
if len(test_cases) < 2:
raise Error('At least one test case must be provided')
if not isinstance(test_cases[0], str):
raise Error(
'The first item in the test cases must be a group name string')
for case in test_cases:
if isinstance(case, str):
message = case
else:
self._cases[message].append(case)
if '' in self._cases:
raise Error('Empty test group names are not permitted')
def _test_contexts(self) -> Iterator[Context[T]]:
for group, test_list in self._cases.items():
for i, test_case in enumerate(test_list, 1):
yield Context(group, i, len(test_list), test_case)
def _generate_python_tests(self, define_py_test: PyTestGenerator):
tests: Dict[str, Callable[[Any], None]] = {}
for ctx in self._test_contexts():
test = define_py_test(ctx)
test.__name__ = ctx.py_name()
if test.__name__ in tests:
raise Error(
f'Multiple Python tests are named {test.__name__}!')
tests[test.__name__] = test
return tests
def python_tests(self, name: str, define_py_test: PyTestGenerator) -> type:
"""Returns a Python unittest.TestCase class with tests for each case."""
return type(name, (unittest.TestCase, ),
self._generate_python_tests(define_py_test))
def _generate_cc_tests(self, define_cpp_test: CcTestGenerator, header: str,
footer: str) -> Iterator[str]:
yield _HEADER_CPP
yield header
for ctx in self._test_contexts():
yield from define_cpp_test(ctx)
yield ''
yield footer
def cc_tests(self, output: TextIO, define_cpp_test: CcTestGenerator,
header: str, footer: str):
"""Writes C++ unit tests for each test case to the given file."""
for line in self._generate_cc_tests(define_cpp_test, header, footer):
output.write(line)
output.write('\n')
def _generate_ts_tests(self, define_ts_test: JsTestGenerator, header: str,
footer: str) -> Iterator[str]:
yield _HEADER_JS
yield header
for ctx in self._test_contexts():
yield from define_ts_test(ctx)
yield footer
def ts_tests(self, output: TextIO, define_js_test: JsTestGenerator,
header: str, footer: str):
"""Writes JS unit tests for each test case to the given file."""
for line in self._generate_ts_tests(define_js_test, header, footer):
output.write(line)
output.write('\n')
def _to_chars(data: bytes) -> Iterator[str]:
for i, byte in enumerate(data):
try:
char = data[i:i + 1].decode()
yield char if char.isprintable() else fr'\x{byte:02x}'
except UnicodeDecodeError:
yield fr'\x{byte:02x}'
def cc_string(data: Union[str, bytes]) -> str:
"""Returns a C++ string literal version of a byte string or UTF-8 string."""
if isinstance(data, str):
data = data.encode()
return '"' + ''.join(_to_chars(data)) + '"'
def parse_test_generation_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Generate unit test files')
parser.add_argument('--generate-cc-test',
type=argparse.FileType('w'),
help='Generate the C++ test file')
parser.add_argument('--generate-ts-test',
type=argparse.FileType('w'),
help='Generate the JS test file')
return parser.parse_known_args()[0]
| 34.420091 | 80 | 0.632263 |
f3c4d7b2ed363bead210319e87549101af368edd | 18,628 | py | Python | tuun/probo/models/gpystan/gp_stan_matern.py | petuum/tuun | 8eec472dbf0e5e695449b0fa2d98985469fd5b30 | [
"Apache-2.0"
] | 33 | 2020-08-30T16:22:35.000Z | 2022-02-26T13:48:32.000Z | tuun/probo/models/gpystan/gp_stan_matern.py | petuum/tuun | 8eec472dbf0e5e695449b0fa2d98985469fd5b30 | [
"Apache-2.0"
] | 2 | 2021-01-18T19:46:43.000Z | 2021-03-24T09:59:14.000Z | tuun/probo/models/gpystan/gp_stan_matern.py | petuum/tuun | 8eec472dbf0e5e695449b0fa2d98985469fd5b30 | [
"Apache-2.0"
] | 2 | 2020-08-25T17:02:15.000Z | 2021-04-21T16:40:44.000Z | """
Classes for GP models with Stan.
"""
from argparse import Namespace
import copy
import numpy as np
from .stan.gp import get_model as get_model_gp
from .stan.gp_fixedsig_matern import get_model as get_model_gp_fixedsig
from .gp.gp_utils import kern_matern, sample_mvn, gp_post
from .util.data_transform import DataTransformer
from .util.misc_util import dict_to_namespace, suppress_stdout_stderr
class StanGpMatern:
"""
Hierarchical GPs, implemented with Stan.
"""
def __init__(self, params=None, verbose=True):
"""
Parameters
----------
params : Namespace_or_dict
Namespace or dict of parameters for this model.
verbose : bool
If True, print description string.
"""
self.set_params(params)
self.set_verbose(verbose)
self.set_model()
def set_params(self, params):
"""Set self.params, the parameters for this model."""
params = dict_to_namespace(params)
# Set self.params
self.params = Namespace()
self.params.ndimx = params.ndimx
self.params.model_str = getattr(params, 'model_str', 'optfixedsig')
self.params.ig1 = getattr(params, 'ig1', 4.0)
self.params.ig2 = getattr(params, 'ig2', 3.0)
self.params.n1 = getattr(params, 'n1', 1.0)
self.params.n2 = getattr(params, 'n2', 1.0)
self.params.sigma = getattr(params, 'sigma', 1e-5)
self.params.niter = getattr(params, 'niter', 70)
self.params.kernel = getattr(params, 'kernel', kern_matern)
self.params.trans_x = getattr(params, 'trans_x', False)
def set_verbose(self, verbose):
"""Set verbose options."""
self.verbose = verbose
if self.verbose:
self.print_str()
def set_model(self):
"""Set GP stan model."""
self.model = self.get_model()
def get_model(self):
"""Returns GP stan model."""
if (
self.params.model_str == 'optfixedsig'
or self.params.model_str == 'sampfixedsig'
):
return get_model_gp_fixedsig(print_status=self.verbose)
elif self.params.model_str == 'opt' or self.params.model_str == 'samp':
return get_model_gp(print_status=self.verbose)
elif self.params.model_str == 'fixedparam':
return None
def set_data(self, data):
"""Set self.data."""
self.data_init = copy.deepcopy(data)
self.data = copy.deepcopy(self.data_init)
# Transform data.x
self.data.x = self.transform_xin_list(self.data.x)
# Transform data.y
self.transform_data_y()
def transform_xin_list(self, xin_list):
"""Transform list of xin (e.g. in data.x)."""
# Ensure data.x is correct format (list of 1D numpy arrays)
xin_list = [np.array(xin).reshape(-1) for xin in xin_list]
if self.params.trans_x:
# apply transformation to xin_list
xin_list_trans = xin_list # TODO: define default transformation
else:
xin_list_trans = xin_list
return xin_list_trans
def transform_data_y(self):
"""Transform data.y using DataTransformer."""
self.dt = DataTransformer(self.data, False)
y_trans = self.dt.transform_y_data()
self.data = Namespace(x=self.data.x, y=y_trans)
def inf(self, data):
"""Set data, run inference, update self.sample_list."""
self.set_data(data)
self.infer_post_and_update_samples()
def post(self, s):
"""Return one posterior sample."""
return np.random.choice(self.sample_list)
def gen_list(self, x_list, z, s, nsamp):
"""
Draw nsamp samples from generative process, given list of inputs
x_list, posterior sample z, and seed s.
Parameters
----------
x_list : list
List of numpy ndarrays each with shape=(self.params.ndimx,)
z : Namespace
Namespace of GP hyperparameters.
s : int
The seed, a positive integer.
nsamp : int
The number of samples to draw from generative process.
Returns
-------
list
A list with len=len(x_list) of numpy ndarrays, each with
shape=(nsamp,).
"""
x_list = self.transform_xin_list(x_list)
pred_list = self.sample_gp_pred(nsamp, x_list)
pred_list = [self.dt.inv_transform_y_data(pr) for pr in pred_list]
return pred_list
def postgen_list(self, x_list, s, nsamp):
"""
Draw nsamp samples from posterior predictive distribution, given list
of inputs x_list and seed s.
Parameters
----------
x_list : list
List of numpy ndarrays each with shape=(self.params.ndimx,).
s : int
The seed, a positive integer.
nsamp : int
The number of samples to draw from the posterior predictive
distribution.
Returns
-------
list
A list with len=len(x_list) of numpy ndarrays, each with
shape=(nsamp,).
"""
x_list = self.transform_xin_list(x_list)
pred_list = self.sample_gp_post_pred(
nsamp, x_list, full_cov=True, nloop=np.min([50, nsamp])
)
pred_list = [self.dt.inv_transform_y_data(pr) for pr in pred_list]
return pred_list
def infer_post_and_update_samples(self, seed=543210, print_result=False):
"""Update self.sample_list."""
data_dict = self.get_stan_data_dict()
if self.params.model_str == 'optfixedsig' or self.params.model_str == 'opt':
def run_stan_optimizing(stan_opt_str):
with suppress_stdout_stderr():
return self.model.optimizing(
data_dict,
iter=self.params.niter,
seed=seed,
as_vector=True,
algorithm=stan_opt_str,
)
try:
stanout = run_stan_optimizing('LBFGS')
except RuntimeError:
print(
'\t*Stan LBFGS optimizer failed. Running Newton '
+ 'optimizer instead.'
)
stanout = run_stan_optimizing('Newton')
elif self.params.model_str == 'samp' or self.params.model_str == 'sampfixedsig':
with suppress_stdout_stderr():
stanout = self.model.sampling(
data_dict,
iter=self.params.niter + self.params.nwarmup,
warmup=self.params.nwarmup,
chains=1,
seed=seed,
refresh=1000,
)
elif self.params.model_str == 'fixedparam':
stanout = None
self.sample_list = self.get_sample_list_from_stan_out(stanout)
if print_result:
self.print_inference_result()
def get_stan_data_dict(self):
"""Return data dict for stan sampling method."""
if (
self.params.model_str == 'optfixedsig'
or self.params.model_str == 'sampfixedsig'
):
return {
'ig1': self.params.ig1,
'ig2': self.params.ig2,
'n1': self.params.n1,
'n2': self.params.n2,
'sigma': self.params.sigma,
'D': self.params.ndimx,
'N': len(self.data.x),
'x': self.data.x,
'y': self.data.y.flatten(),
'covid': 1,
}
elif self.params.model_str == 'opt' or self.params.model_str == 'samp':
return {
'ig1': self.params.ig1,
'ig2': self.params.ig2,
'n1': self.params.n1,
'n2': self.params.n2,
'n3': self.params.n3,
'n4': self.params.n4,
'D': self.params.ndimx,
'N': len(self.data.x),
'y': self.data.y.flatten(),
'x': self.data.x,
}
def get_sample_list_from_stan_out(self, stanout):
"""Convert stan output to sample_list."""
if self.params.model_str == 'optfixedsig':
return [
Namespace(
ls=stanout['rho'], alpha=stanout['alpha'], sigma=self.params.sigma
)
]
elif self.params.model_str == 'opt':
return [
Namespace(
ls=stanout['rho'], alpha=stanout['alpha'], sigma=stanout['sigma']
)
]
elif self.params.model_str == 'sampfixedsig':
sdict = stanout.extract(['rho', 'alpha'])
return [
Namespace(
ls=sdict['rho'][i], alpha=sdict['alpha'][i], sigma=self.params.sigma
)
for i in range(sdict['rho'].shape[0])
]
elif self.params.model_str == 'samp':
sdict = stanout.extract(['rho', 'alpha', 'sigma'])
return [
Namespace(
ls=sdict['rho'][i], alpha=sdict['alpha'][i], sigma=sdict['sigma'][i]
)
for i in range(sdict['rho'].shape[0])
]
elif self.params.model_str == 'fixedparam':
return [
Namespace(
ls=self.params.ls, alpha=self.params.alpha, sigma=self.params.sigma
)
]
def print_inference_result(self):
"""Print results of stan inference."""
if (
self.params.model_str == 'optfixedsig'
or self.params.model_str == 'opt'
or self.params.model_str == 'fixedparam'
):
print('*ls pt est = ' + str(self.sample_list[0].ls) + '.')
print('*alpha pt est = ' + str(self.sample_list[0].alpha) + '.')
print('*sigma pt est = ' + str(self.sample_list[0].sigma) + '.')
elif self.params.model_str == 'samp' or self.params.model_str == 'sampfixedsig':
ls_arr = np.array([ns.ls for ns in self.sample_list])
alpha_arr = np.array([ns.alpha for ns in self.sample_list])
sigma_arr = np.array([ns.sigma for ns in self.sample_list])
print('*ls mean = ' + str(ls_arr.mean()) + '.')
print('*ls std = ' + str(ls_arr.std()) + '.')
print('*alpha mean = ' + str(alpha_arr.mean()) + '.')
print('*alpha std = ' + str(alpha_arr.std()) + '.')
print('*sigma mean = ' + str(sigma_arr.mean()) + '.')
print('*sigma std = ' + str(sigma_arr.std()) + '.')
print('-----')
def sample_gp_pred(self, nsamp, input_list, lv=None):
"""
Sample from GP predictive distribution given one posterior GP sample.
Parameters
----------
nsamp : int
Number of samples from predictive distribution.
input_list : list
A list of numpy ndarray shape=(self.params.ndimx, ).
lv : Namespace
Namespace for posterior sample.
Returns
-------
list
A list of len=len(input_list) of numpy ndarrays shape=(nsamp, 1).
"""
x_pred = np.stack(input_list)
if lv is None:
if (
self.params.model_str == 'optfixedsig'
or self.params.model_str == 'opt'
or self.params.model_str == 'fixedparam'
):
lv = self.sample_list[0]
elif (
self.params.model_str == 'samp'
or self.params.model_str == 'sampfixedsig'
):
lv = self.sample_list[np.random.randint(len(self.sample_list))]
postmu, postcov = gp_post(
self.data.x,
self.data.y,
x_pred,
lv.ls,
lv.alpha,
lv.sigma,
self.params.kernel,
)
single_post_sample = sample_mvn(postmu, postcov, 1).reshape(-1)
pred_list = [
single_post_sample for _ in range(nsamp)
] #### TODO: instead of duplicating this TS, sample nsamp times from generative process (given/conditioned-on this TS)
return list(np.stack(pred_list).T)
def sample_gp_post_pred(self, nsamp, input_list, full_cov=False, nloop=None):
"""
Sample from GP posterior predictive distribution.
Parameters
----------
nsamp : int
Number of samples from posterior predictive distribution.
input_list : list
A list of numpy ndarray shape=(self.params.ndimx, ).
full_cov : bool
If True, return covariance matrix, else return diagonal only.
Returns
-------
list
A list of len=len(input_list) of numpy ndarrays shape=(nsamp, 1).
"""
if (
self.params.model_str == 'optfixedsig'
or self.params.model_str == 'opt'
or self.params.model_str == 'fixedparam'
):
nloop = 1
sampids = [0]
elif self.params.model_str == 'samp' or self.params.model_str == 'sampfixedsig':
if nloop is None:
nloop = nsamp
nsamp = int(nsamp / nloop)
sampids = np.random.randint(len(self.sample_list), size=(nloop,))
ppred_list = []
for i in range(nloop):
samp = self.sample_list[sampids[i]]
postmu, postcov = gp_post(
self.data.x,
self.data.y,
np.stack(input_list),
samp.ls,
samp.alpha,
samp.sigma,
self.params.kernel,
full_cov,
)
if full_cov:
ppred_list.extend(list(sample_mvn(postmu, postcov, nsamp)))
else:
ppred_list.extend(
list(
np.random.normal(
postmu.reshape(-1,),
postcov.reshape(-1,),
size=(nsamp, len(input_list)),
)
)
)
return list(np.stack(ppred_list).T)
def get_gp_prior_mu_cov(self, x_list, full_cov=True):
"""
Return GP prior parameters: mean (mu) and covariance (cov).
Parameters
----------
x_list : list
List of numpy ndarrays, each representing a domain point.
full_cov : bool
If True, return covariance matrix. If False, return list of standard
deviations.
Returns
-------
mu : ndarray
A numpy 1d ndarray with len=len(x_list) of floats, corresponding to
posterior mean for each x in x_list.
cov : ndarray
If full_cov is False, return a numpy 1d ndarray with len=len(x_list) of
floats, corresponding to posterior standard deviations for each x in x_list.
If full_cov is True, return the covariance matrix as a numpy ndarray
(len(x_list) x len(x_list)).
"""
# NOTE: currently assumes zero-mean prior.
# TODO: generalized beyond zero-mean prior.
# TODO: set prior sample
prior_sample = None
mu = np.zeros(len(x_list))
cov = self.params.kernel(x_list, x_list, prior_sample.ls, prior_sample.alpha)
if full_cov is False:
cov = np.sqrt(np.diag(cov))
return mu, cov
def get_gp_post_mu_cov(self, x_list, full_cov=True):
"""
Return GP posterior parameters: mean (mu) and covariance (cov). If there is no
data, return the GP prior parameters.
Parameters
----------
x_list : list
List of numpy ndarrays, each representing a domain point.
full_cov : bool
If True, return covariance matrix. If False, return list of standard
deviations.
Returns
-------
mu : ndarray
A numpy 1d ndarray with len=len(x_list) of floats, corresponding to
posterior mean for each x in x_list.
cov : ndarray
If full_cov is False, return a numpy 1d ndarray with len=len(x_list) of
floats, corresponding to posterior standard deviations for each x in x_list.
If full_cov is True, return the covariance matrix as a numpy ndarray
(len(x_list) x len(x_list)).
"""
if len(self.data.x) == 0:
return self.get_gp_prior_mu_cov(x_list, full_cov)
# If data is not empty:
# Choose final sample
final_sample = self.sample_list[-1]
mu, cov = gp_post(
self.data.x,
self.data.y,
x_list,
final_sample.ls,
final_sample.alpha,
final_sample.sigma,
self.params.kernel,
full_cov=full_cov,
)
return mu, cov
def get_gp_post_mu_cov_single(self, x):
"""Get GP posterior for an input x. Return posterior mean and std for x."""
mu_arr, std_arr = self.get_gp_post_mu_cov([x], full_cov=False)
return mu_arr[0], std_arr[0]
def sample_gp_prior(self, x_list, n_samp, full_cov=True):
"""Get samples from gp prior for each input in x_list."""
mu, cov = self.get_gp_prior_mu_cov(x_list, full_cov)
return self.get_normal_samples(mu, cov, n_samp, full_cov)
def sample_gp_post(self, x_list, n_samp, full_cov=True):
"""Get samples from gp posterior for each input in x_list."""
if len(self.data.x) == 0:
return self.sample_gp_prior(x_list, n_samp, full_cov)
# If data is not empty:
mu, cov = self.get_gp_post_mu_cov(x_list, full_cov)
return self.get_normal_samples(mu, cov, n_samp, full_cov)
def get_normal_samples(self, mu, cov, n_samp, full_cov):
"""Return normal samples."""
if full_cov:
sample_list = list(sample_mvn(mu, cov, n_samp))
else:
sample_list = list(
np.random.normal(
mu.reshape(-1,), cov.reshape(-1,), size=(n_samp, len(mu))
)
)
x_list_sample_list = list(np.stack(sample_list).T)
return x_list_sample_list
def print_str(self):
"""Print a description string."""
print('*StanGpMatern with params={}'.format(self.params))
| 35.823077 | 127 | 0.544127 |
0e830441d9c25e71605d6a476b796a10c6ebb0c9 | 223 | py | Python | src/cdf/__init__.py | ana-balica/classy-django-forms | fd44f365cd5da27ef7087977dd30f45501ecd338 | [
"MIT"
] | 23 | 2017-08-20T05:31:16.000Z | 2022-01-26T13:43:20.000Z | src/cdf/__init__.py | ana-balica/classy-django-forms | fd44f365cd5da27ef7087977dd30f45501ecd338 | [
"MIT"
] | null | null | null | src/cdf/__init__.py | ana-balica/classy-django-forms | fd44f365cd5da27ef7087977dd30f45501ecd338 | [
"MIT"
] | 1 | 2020-01-04T14:58:26.000Z | 2020-01-04T14:58:26.000Z | import django
from django.conf import settings
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
)
django.setup()
| 15.928571 | 51 | 0.55157 |
327a8b147d9b80ee67753b73bd6e6ed731138beb | 418 | py | Python | UpdateDependencies.py | MarcelPiNacy/Zero | 4d5e01c01ffa171fe5db2b641eec7bca03ccea00 | [
"Apache-2.0"
] | null | null | null | UpdateDependencies.py | MarcelPiNacy/Zero | 4d5e01c01ffa171fe5db2b641eec7bca03ccea00 | [
"Apache-2.0"
] | null | null | null | UpdateDependencies.py | MarcelPiNacy/Zero | 4d5e01c01ffa171fe5db2b641eec7bca03ccea00 | [
"Apache-2.0"
] | null | null | null | import os
import pathlib
import json
wd = str(pathlib.Path().cwd())
file = open("dependencies.json")
d = json.load(file)
for name, url in d["dependencies"] :
print("--- Updating " + name + " ---")
if os.path.isdir(wd + "/dependencies/" + name + "/"):
os.system("git -C " + wd + "/dependencies/" + name + " pull origin")
else:
os.system("git -C " + wd + "/dependencies/" + " clone " + url)
| 29.857143 | 76 | 0.569378 |
b9ffd1cd9d8d96f4b3604983d57c44df65361616 | 3,290 | py | Python | scripts/update_dreqs/update_dreqs_0327.py | jonseddon/primavera-dmt | 1239044e37f070b925a3d06db68351f285df780c | [
"BSD-3-Clause"
] | null | null | null | scripts/update_dreqs/update_dreqs_0327.py | jonseddon/primavera-dmt | 1239044e37f070b925a3d06db68351f285df780c | [
"BSD-3-Clause"
] | 49 | 2018-11-14T17:00:03.000Z | 2021-12-20T11:04:22.000Z | scripts/update_dreqs/update_dreqs_0327.py | jonseddon/primavera-dmt | 1239044e37f070b925a3d06db68351f285df780c | [
"BSD-3-Clause"
] | 2 | 2018-07-04T10:58:43.000Z | 2018-09-29T14:55:08.000Z | #!/usr/bin/env python
"""
update_dreqs_0327.py
Remove from the DMT the CMCC tasmax and tasmin variables from all experiments
(there should be 40 datasets in total) as per
https://errata.es-doc.org/static/view.html?uid=9b40a054-21a7-5ae7-a3eb-8c373c5adddc
"""
import argparse
import logging.config
import sys
import django
django.setup()
from django.contrib.auth.models import User # nopep8
from pdata_app.utils.common import delete_files # nopep8
from pdata_app.utils.replace_file import replace_files # nopep8
from pdata_app.models import DataIssue, DataRequest, Settings # nopep8
__version__ = '0.1.0b1'
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
# The top-level directory to write output data to
BASE_OUTPUT_DIR = Settings.get_solo().base_output_dir
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Add additional data '
'requests')
parser.add_argument('-l', '--log-level',
help='set logging level (default: %(default)s)',
choices=['debug', 'info', 'warning', 'error'],
default='info')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main():
"""
Main entry point
"""
dreqs = DataRequest.objects.filter(
institute__short_name='CMCC',
variable_request__cmor_name__in=['tasmax', 'tasmin'],
datafile__isnull=False
).distinct()
num_dreqs = dreqs.count()
expected_dreqs = 40
if num_dreqs != expected_dreqs:
logger.error(f'Found {num_dreqs} but was expecting {expected_dreqs}.')
sys.exit(1)
daniele = User.objects.get(username='dpeano')
long_txt = (
"CMCC tasmax and tasmin contain errors and must be withdrawn. Please"
"see https://errata.es-doc.org/static/view.html?uid=9b40a054-21a7-5ae7"
"-a3eb-8c373c5adddc."
)
tas_issue, _created = DataIssue.objects.get_or_create(issue=long_txt,
reporter=daniele)
for dreq in dreqs:
logger.info(dreq)
tas_issue.data_file.add(*dreq.datafile_set.all())
delete_files(dreq.datafile_set.all(), BASE_OUTPUT_DIR, skip_badc=True)
replace_files(dreq.datafile_set.all())
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
log_level = getattr(logging, cmd_args.log_level.upper())
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main()
| 29.375 | 83 | 0.597568 |
d730293ba7127f680f41e73602fbb18d5e5060e2 | 1,958 | py | Python | venv/Lib/site-packages/pyrogram/raw/types/base_theme_day.py | iamgeorgiy/heroku-userbot | 5a92417d16f8ead949d88cb38da213fc2da5d3a4 | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/pyrogram/raw/types/base_theme_day.py | iamgeorgiy/heroku-userbot | 5a92417d16f8ead949d88cb38da213fc2da5d3a4 | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/pyrogram/raw/types/base_theme_day.py | iamgeorgiy/heroku-userbot | 5a92417d16f8ead949d88cb38da213fc2da5d3a4 | [
"Apache-2.0"
] | null | null | null | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class BaseThemeDay(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.BaseTheme`.
Details:
- Layer: ``117``
- ID: ``0xfbd81688``
**No parameters required.**
"""
__slots__: List[str] = []
ID = 0xfbd81688
QUALNAME = "types.BaseThemeDay"
def __init__(self) -> None:
pass
@staticmethod
def read(data: BytesIO, *args: Any) -> "BaseThemeDay":
# No flags
return BaseThemeDay()
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
return data.getvalue()
| 30.59375 | 103 | 0.62666 |
f74e08e4c8d1d1fe496413d1edcd2c47c6720d19 | 1,230 | py | Python | cvrf2csaf/section_handlers/document_publisher.py | sthagen/csaf-tools-CVRF-CSAF-Converter | bc458eb1a71cf12c5a7c45223b56ab233819bdbd | [
"Apache-2.0",
"MIT"
] | 9 | 2021-12-07T07:52:28.000Z | 2022-03-23T10:26:25.000Z | cvrf2csaf/section_handlers/document_publisher.py | sthagen/csaf-tools-CVRF-CSAF-Converter | bc458eb1a71cf12c5a7c45223b56ab233819bdbd | [
"Apache-2.0",
"MIT"
] | 71 | 2021-12-03T09:40:56.000Z | 2022-03-29T21:47:38.000Z | cvrf2csaf/section_handlers/document_publisher.py | sthagen/csaf-tools-CVRF-CSAF-Converter | bc458eb1a71cf12c5a7c45223b56ab233819bdbd | [
"Apache-2.0",
"MIT"
] | 3 | 2021-12-07T07:52:32.000Z | 2022-02-17T09:55:00.000Z | """ Module containing DocumentPublisher class """
from ..common.common import SectionHandler
# pylint: disable=too-few-public-methods
class DocumentPublisher(SectionHandler):
""" Responsible for converting the DocumentPublisher section:
- /cvrf:cvrfdoc/cvrf:DocumentPublisher
"""
type_category_mapping = {
'Vendor': 'vendor',
'Coordinator': 'coordinator',
'User': 'user',
'Discoverer': 'discoverer',
'Other': 'other',
}
def __init__(self, config):
super().__init__()
self.name = config.get('publisher_name')
self.namespace = config.get('publisher_namespace')
def _process_mandatory_elements(self, root_element):
self.csaf['name'] = self.name
self.csaf['namespace'] = self.namespace
self.csaf['category'] = self.type_category_mapping[root_element.attrib['Type']]
def _process_optional_elements(self, root_element):
# optional values
if hasattr(root_element, 'ContactDetails'):
self.csaf['contact_details'] = root_element.ContactDetails.text
if hasattr(root_element, 'IssuingAuthority'):
self.csaf['issuing_authority'] = root_element.IssuingAuthority.text
| 36.176471 | 87 | 0.669919 |
cb9b2a22ce2ce8fb200db65114f83c6c7f74f6bd | 3,019 | py | Python | programaestudiante.py | josepive/bigdataclass | e432043eba23fef3e9a02df8a3a42a0a331a88e4 | [
"Apache-2.0"
] | null | null | null | programaestudiante.py | josepive/bigdataclass | e432043eba23fef3e9a02df8a3a42a0a331a88e4 | [
"Apache-2.0"
] | null | null | null | programaestudiante.py | josepive/bigdataclass | e432043eba23fef3e9a02df8a3a42a0a331a88e4 | [
"Apache-2.0"
] | null | null | null | from pyspark.sql import SparkSession
from pyspark.sql.types import (DateType, IntegerType, FloatType,
StringType, StructField, StructType, TimestampType)
from pyspark.sql.functions import sum, round, when, count
def join_dataframes(estudiante_df, curso_df, nota_df):
estudiante_nota_df = estudiante_df.join(nota_df, on='Carnet')
# Se ignora columna de carrera, que ya está en estudiante_nota_df
curso_1_df = curso_df.select('Curso', 'Creditos')
estudiante_nota_curso_df = estudiante_nota_df.join(curso_1_df, on='Curso')
joint_df = estudiante_nota_curso_df.select(
'Carnet', 'Nombre', 'Carrera', 'Curso', 'Nota', 'Creditos').sort('Carnet', 'Curso')
return joint_df
def aggregate_dataframe(joint_df):
joint_1_df = joint_df.select('Carnet', 'Nota', 'Creditos')
weighted_average_df = joint_1_df.groupBy('Carnet').agg(round(sum(joint_df.Nota * joint_df.Creditos) / (
when(sum(joint_df.Creditos) == 0, count(joint_df.Creditos))).otherwise(sum(joint_df.Creditos)), 2).alias('Promedio'))
aggregated_df = joint_df.select('Carnet', 'Nombre', 'Carrera').distinct().join(
weighted_average_df, 'Carnet').sort('Carnet')
return aggregated_df
def top_dataframe(aggregated_df):
return aggregated_df.sort('Carrera').sort('Promedio', ascending=False)
if __name__ == '__main__':
import sys
spark = SparkSession.builder.appName('Mejores Promedios').getOrCreate()
# Archivo de estudiantes
estudiante_schema = StructType([StructField('Carnet', IntegerType()),
StructField('Nombre', StringType()),
StructField('Carrera', StringType())])
estudiante_df = spark.read.csv(sys.argv[1],
schema=estudiante_schema,
header=False)
estudiante_df.show()
# Archivo de cursos
curso_schema = StructType([StructField('Curso', IntegerType()),
StructField('Creditos', IntegerType()),
StructField('Carrera', StringType())])
curso_df = spark.read.csv(sys.argv[2],
schema=curso_schema,
header=False)
curso_df.show()
# Archivo de notas
nota_schema = StructType([StructField('Carnet', IntegerType()),
StructField('Curso', IntegerType()),
StructField('Nota', FloatType())])
nota_df = spark.read.csv(sys.argv[3],
schema=nota_schema,
header=False)
nota_df.show()
# Mejores promedios
joint_df = join_dataframes(estudiante_df, curso_df, nota_df)
joint_df.show()
aggregated_df = aggregate_dataframe(joint_df)
aggregated_df.show()
top_df = top_dataframe(aggregated_df)
top_df.show()
| 33.175824 | 126 | 0.600199 |
3e858a880ed3cf2e17c514e41cd47a4f0ccbe43f | 912 | py | Python | backend/searchevents/migrations/0001_initial.py | buubblegun/berry | 5d0e27d39b618a7161b53e5bb735dab570faf14b | [
"MIT"
] | null | null | null | backend/searchevents/migrations/0001_initial.py | buubblegun/berry | 5d0e27d39b618a7161b53e5bb735dab570faf14b | [
"MIT"
] | null | null | null | backend/searchevents/migrations/0001_initial.py | buubblegun/berry | 5d0e27d39b618a7161b53e5bb735dab570faf14b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-02 00:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('eventName', models.CharField(max_length=200)),
('eventLocation', models.CharField(max_length=200)),
('eventDate', models.DateTimeField(verbose_name='date published')),
('eventType', models.CharField(max_length=200)),
('eventDescription', models.CharField(max_length=2000)),
('eventLink', models.CharField(max_length=2000)),
],
),
]
| 31.448276 | 114 | 0.596491 |
b223f8f65b5c9fdb4dc325cdf92d113711f56c59 | 3,008 | py | Python | tests/test_jao.py | Open-Dataplatform/osiris-egress-api | e4f759b835ec06b129bb92409c06195c0cfb2492 | [
"MIT"
] | 1 | 2021-06-16T14:17:41.000Z | 2021-06-16T14:17:41.000Z | tests/test_jao.py | Open-Dataplatform/osiris-egress-api | e4f759b835ec06b129bb92409c06195c0cfb2492 | [
"MIT"
] | 5 | 2021-06-07T20:39:03.000Z | 2022-02-16T14:27:48.000Z | tests/test_jao.py | Open-Dataplatform/osiris-egress-api | e4f759b835ec06b129bb92409c06195c0cfb2492 | [
"MIT"
] | 1 | 2021-03-29T08:17:50.000Z | 2021-03-29T08:17:50.000Z | from http import HTTPStatus
from io import BytesIO
import pandas as pd
from tests.conftest import client
from tests.conftest import ACCESS_TOKEN
def test_download_jao(mocker):
import app.routers.jao
download_parquet_data = mocker.patch('app.routers.jao.download_parquet_data_v1')
download_parquet_data.return_value = ([{'data': 'data'}], HTTPStatus.OK)
get_guid_config = mocker.patch('app.routers.jao.get_guid_config')
get_guid_config.return_value = 'INDEX', 'HORIZON'
app.routers.jao.config = {'JAO': {'yearly_guid': 'yearly_guid_1234',
'monthly_guid': 'monthly_guid_1234'}}
response = client.get(
'/v1/jao',
headers={'Authorization': ACCESS_TOKEN},
params={'horizon': 'YEARLY', 'from_date': '2021-01-01', 'to_date': '2021-01-02'}
)
assert response.status_code == HTTPStatus.OK
assert download_parquet_data.called
assert download_parquet_data.await_args.args == ('yearly_guid_1234', ACCESS_TOKEN, 'INDEX', 'HORIZON', '2021-01-01', '2021-01-02')
assert response.json() == [{'data': 'data'}]
response = client.get(
'/v1/jao',
headers={'Authorization': ACCESS_TOKEN},
params={'horizon': 'MONTHLY', 'from_date': '2021-01-01', 'to_date': '2021-01-02'}
)
assert response.status_code == HTTPStatus.OK
assert download_parquet_data.called
assert download_parquet_data.await_args.args == ('monthly_guid_1234', ACCESS_TOKEN, 'INDEX', 'HORIZON', '2021-01-01', '2021-01-02')
assert response.json() == [{'data': 'data'}]
response = client.get(
'/v1/jao',
headers={'Authorization': ACCESS_TOKEN},
params={'horizon': 'MONTHLY'}
)
assert response.status_code == HTTPStatus.UNPROCESSABLE_ENTITY
response = client.get(
'/v1/jao',
headers={'Authorization': ACCESS_TOKEN},
params={'horizon': 'DAILY', 'from_date': '2021-01-01', 'to_date': '2021-01-02'}
)
assert response.status_code == HTTPStatus.BAD_REQUEST
assert response.json()['detail'] == '(ValueError) The horizon value can only be Yearly or Monthly.'
def test_download_jao_eds(mocker):
import app.routers.jao
test_df = pd.DataFrame({'jao': [1, 2], 'eds': [2, 3]})
bytes_io_file = BytesIO()
test_df.to_parquet(bytes_io_file, engine='pyarrow', compression='snappy')
bytes_io_file.seek(0)
download_blob_to_stream = mocker.patch('app.routers.jao.download_blob_to_stream')
download_blob_to_stream.return_value = bytes_io_file
app.routers.jao.config = {'JAO EDS': {'guid': 'jao_eds_guid'}}
response = client.get(
'/v1/jao_eds/2021/04/D1-DE',
headers={'Authorization': ACCESS_TOKEN},
)
assert response.status_code == HTTPStatus.OK
assert download_blob_to_stream.called
assert download_blob_to_stream.await_args.args == ('jao_eds_guid/year=2021/month=04/D1-DE.parquet', ACCESS_TOKEN)
assert response.json() == [{'jao': 1, 'eds': 2}, {'jao': 2, 'eds': 3}]
| 38.075949 | 135 | 0.668883 |
e8cb45198216b54a6b423939b444b9436a97e6f2 | 496 | py | Python | 1342.number-of-steps-to-reduce-a-number-to-zero.py | dely2p/Leetcode | 2463f512d9051707df9864dffe7f3f3c12d18eab | [
"MIT"
] | 1 | 2020-07-08T05:13:05.000Z | 2020-07-08T05:13:05.000Z | 1342.number-of-steps-to-reduce-a-number-to-zero.py | dely2p/Leetcode | 2463f512d9051707df9864dffe7f3f3c12d18eab | [
"MIT"
] | null | null | null | 1342.number-of-steps-to-reduce-a-number-to-zero.py | dely2p/Leetcode | 2463f512d9051707df9864dffe7f3f3c12d18eab | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=1342 lang=python3
#
# [1342] Number of Steps to Reduce a Number to Zero
#
# @lc code=start
class Solution:
def numberOfSteps (self, num: int) -> int:
result = num
cnt = 0
while True:
if result == 0:
break
if result % 2 == 0:
result = result / 2
else:
result = result - 1
cnt = cnt + 1
print(result)
return cnt
# @lc code=end
| 19.84 | 51 | 0.469758 |
6b6add87e384994b77b69c941bb7bcfa57572b2d | 663 | py | Python | airflow/dags/my_dag_example.py | zkan/try-airflow | d2f008ce2e19983e88e77b46534824c214ad0399 | [
"MIT"
] | 1 | 2017-01-15T13:53:01.000Z | 2017-01-15T13:53:01.000Z | airflow/dags/my_dag_example.py | zkan/try-airflow | d2f008ce2e19983e88e77b46534824c214ad0399 | [
"MIT"
] | 10 | 2017-02-15T07:31:08.000Z | 2022-03-29T22:28:10.000Z | airflow/dags/my_dag_example.py | zkan/try-airflow | d2f008ce2e19983e88e77b46534824c214ad0399 | [
"MIT"
] | 1 | 2017-02-15T07:03:16.000Z | 2017-02-15T07:03:16.000Z | from datetime import datetime
from airflow import DAG
from airflow.operators.bash import BashOperator
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2017, 5, 8),
'email': ['zkan@hey.com']
}
dag = DAG(
'my_dag_example',
default_args=default_args,
schedule_interval='*/30 * * * *',
catchup=False,
tags=['odds',],
)
t1 = BashOperator(
task_id='print_date',
bash_command='date',
dag=dag
)
t2 = BashOperator(
task_id='sleep',
bash_command='sleep 5',
dag=dag
)
t3 = BashOperator(
task_id='hello',
bash_command='echo "hello"',
dag=dag
)
t1 >> t2 >> t3
| 16.575 | 47 | 0.627451 |
d37f6b6f65b2da4d2acf77cc286e32c51ffa165a | 314 | py | Python | benchmark/python-cgi/handler.py | rrramiro/dinosaur | 3fd147f204bae609717d7a2093a161849dc7e31a | [
"WTFPL"
] | 40 | 2017-04-27T00:07:16.000Z | 2019-08-20T00:54:52.000Z | benchmark/python-cgi/handler.py | rrramiro/dinosaur | 3fd147f204bae609717d7a2093a161849dc7e31a | [
"WTFPL"
] | null | null | null | benchmark/python-cgi/handler.py | rrramiro/dinosaur | 3fd147f204bae609717d7a2093a161849dc7e31a | [
"WTFPL"
] | 1 | 2019-10-22T12:28:13.000Z | 2019-10-22T12:28:13.000Z | #!/usr/bin/env python3
import os, cgi, sys
if __name__ == "__main__":
fields = cgi.FieldStorage()
path_info = os.environ.get("PATH_INFO","")
path_components = path_info.split("/")
sys.stdout.write("Content-type: text/html\r\n\r\n")
print("hello\n")
print(fields)
print(path_components) | 28.545455 | 55 | 0.659236 |
1e8098986583366a43db7aacfcd34400fd4a36af | 2,042 | py | Python | metaconfigure/collect_subprojects.py | njoy/pdqsort-adapter | 4da5dec7cc809a1b97c40a1fac4202565c08de7e | [
"Zlib"
] | null | null | null | metaconfigure/collect_subprojects.py | njoy/pdqsort-adapter | 4da5dec7cc809a1b97c40a1fac4202565c08de7e | [
"Zlib"
] | 3 | 2019-10-29T22:49:18.000Z | 2019-11-13T16:10:03.000Z | metaconfigure/collect_subprojects.py | njoy/knoop | 411e624a54f03969081b5b4dfa257ade5d979362 | [
"FSFAP"
] | null | null | null | #! /usr/bin/env python
"""
A script to generate a flat subproject directory from a tree of dependency directories
"""
import os
import subprocess
import textwrap
import json
import shutil
import sys
import warnings
def project_name():
return os.path.split( os.getcwd() )[1]
def dependency_directory():
"""
A function isolating the path specification to the project's dependency directory
"""
return os.path.join( os.getcwd(), "dependencies" )
def traverse_dependencies( destination, traversed ):
"""
collect unique links to dependency projects in a destination folder
"""
if not os.path.isdir( dependency_directory() ):
return
os.chdir( dependency_directory() )
for dependency in os.listdir( os.getcwd() ) :
if os.path.isdir( dependency ) and not dependency in traversed :
traversed.add( dependency )
os.chdir( dependency )
if not os.path.isdir( os.path.join( destination, dependency ) ):
try:
os.symlink( os.getcwd(),
os.path.join( destination, dependency ) )
except OSError:
warnings.warn( "Could not create symbolic "
"link from {} to subprojects directory."\
.format( os.getcwd() ) )
warnings.warn( "Copying directory contents instead" )
shutil.copytree( os.getcwd(),
os.path.join( destination, dependency ),
ignore = shutil.ignore_patterns("dependencies") )
traverse_dependencies( destination, traversed )
os.chdir( ".." )
os.chdir( os.path.join( ".." ) )
def collect_subprojects():
destination = os.path.join( os.getcwd(), "subprojects" )
if not os.path.isdir( destination ):
os.makedirs( destination )
traverse_dependencies( destination, set() )
collect_subprojects()
| 32.935484 | 86 | 0.580803 |
1826a01666f56b2cf58fb06fc36fed38c3902d6f | 4,420 | py | Python | css/image-fit/reftests/img-jpg-wide/build.py | gsnedders/presto-testo | a0acfbef13a3f8cae67cc7145216d31b67aa8eb4 | [
"BSD-3-Clause"
] | null | null | null | css/image-fit/reftests/img-jpg-wide/build.py | gsnedders/presto-testo | a0acfbef13a3f8cae67cc7145216d31b67aa8eb4 | [
"BSD-3-Clause"
] | null | null | null | css/image-fit/reftests/img-jpg-wide/build.py | gsnedders/presto-testo | a0acfbef13a3f8cae67cc7145216d31b67aa8eb4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
import sys
import os
sys.path.insert(0, os.path.abspath("../../include/"))
import allpairs
imgfilename = 'wide.jpg'
imgwidth = 240.0
imgheight = 128.0
test_template = """<!doctype html>
<!-- This file is generated by build.py. -->
<title>img %s; %s</title>
<link rel="stylesheet" href="../../support/reftests.css">
<style>
#test > * { %s }
</style>
<div id="test">
<img src="../../support/%s">
</div>
"""
ref_template = """<!doctype html>
<!-- This file is generated by build.py. -->
<title>Reference for img %s; %s</title>
<link rel="stylesheet" href="../../support/reftests.css">
<style>
.helper { overflow:%s }
.helper > * { %s }
</style>
<div id="ref">
<span class="helper"><img src="../../support/%s"></span>
</div>
"""
reftest_list = ''
ref_hashes = {}
for overflow,fit,x,y in allpairs.tests:
refx = refy = ''
testx = x
testy = y
xx = x
if x.find('%') != -1:
xx = x[:-1]
yy = y
if y.find('%') != -1:
yy = y[:-1]
# reference dimensions
if fit == 'none':
refdims = ''
centerx = 100 - (imgwidth/2)
centery = 100 - (imgheight/2)
elif fit == 'fill' or fit == 'auto':
refdims = 'width:200px; height:200px'
centerx = 0.0
centery = 0.0
elif fit == 'cover':
refdims = 'height:200px'
centerx = 100 - ((imgwidth * 200 / imgheight)/2)
centery = 0.0
elif fit == 'contain':
refdims = 'width:200px'
centerx = 0.0
centery = 100 - ((imgheight * 200 / imgwidth)/2)
centerx = 'left:'+str(centerx)+'px'
centery = 'top:'+str(centery)+'px'
# reference position
invalid = False
# invalid cases use center center
if ((xx != x or x in ('1em', '30px', '2cm')) and (y in ('left', 'right')) or
(yy != y or y in ('1em', '30px', '2cm')) and (x in ('top', 'bottom')) or
(x == 'top' and y == 'bottom') or (x == 'bottom' and y == 'top') or
(x == 'left' and y == 'right') or (x == 'right' and y == 'left') or
x == y == 'left' or x == y == 'right' or x == y == 'top' or x == y == 'bottom'):
refx = centerx
refy = centery
invalid = True
# valid cases
else:
# normalize the order
if (x in ('top', 'center', 'bottom') and y in ('left' , 'center', 'right')):
x, y = y, x
# x
# center
if x == '50%' or x == 'center' or x == '':
refx = centerx
# left
elif x == '0%' or x == 'left':
refx = 'left:0'
# right
elif x == '100%' or x == 'right':
refx = 'right:0'
# lengths
elif x == '1em' or x == '30px' or x == '2cm':
refx = 'left:'+x
# y
# center
if y == '50%' or y == 'center':
refy = centery
# top
elif y == '0%' or y == 'top':
refy = 'top:0'
# bottom
elif y == '100%' or y == 'bottom':
refy = 'bottom:0'
# lengths
elif y == '1em' or y == '30px' or y == '2cm':
refy = 'top:'+y
# single keyword
elif y == '':
# y value in x
if x == 'top':
refx = centerx
refy = 'top:0'
elif x == 'bottom':
refx = centerx
refy = 'bottom:0'
# x value in x
else:
refy = centery
test_filename = "%s_%s_%s_%s.html" % (overflow, fit, xx, yy)
style = "overflow:%s; -o-object-fit:%s; -o-object-position:%s %s" % (overflow, fit, testx, testy)
if invalid:
style += " /* INVALID */"
test_file = open(test_filename, 'w')
test_file.write(test_template % (imgfilename, style, style, imgfilename))
test_file.close()
refstyle = "%s; %s; %s" % (refx, refy, refdims)
if [v for k, v in ref_hashes.iteritems() if k == overflow+refstyle] == []:
ref_filename = "%s_%s_%s_%s-ref.html" % (overflow, fit, xx, yy)
ref_hashes[overflow+refstyle] = ref_filename
ref_file = open(ref_filename, 'w')
ref_file.write(ref_template % (imgfilename, style, overflow, refstyle, imgfilename))
ref_file.close()
else:
ref_filename = ref_hashes[overflow+refstyle]
reftest_list += '== ' + test_filename + ' ' + ref_filename + '\n'
list_file = open('reftest.list', 'w')
list_file.write(reftest_list)
list_file.close()
| 30.068027 | 101 | 0.500452 |
477ecbfb0e22de84f22778a42662ecd96182e4e9 | 34,235 | py | Python | integration-tests/clipper_admin_tests.py | Evan-JH-Kim/clipper | 85b508b2c2fdd6f6122a27648c8cdf5e69a751e9 | [
"Apache-2.0"
] | null | null | null | integration-tests/clipper_admin_tests.py | Evan-JH-Kim/clipper | 85b508b2c2fdd6f6122a27648c8cdf5e69a751e9 | [
"Apache-2.0"
] | null | null | null | integration-tests/clipper_admin_tests.py | Evan-JH-Kim/clipper | 85b508b2c2fdd6f6122a27648c8cdf5e69a751e9 | [
"Apache-2.0"
] | null | null | null | """
Executes a test suite consisting of two separate cases: short tests and long tests.
Before each case, an instance of Clipper is created. Tests
are then performed by invoking methods on this instance, often resulting
in the execution of docker commands.
"""
from __future__ import absolute_import, division, print_function
import unittest
import sys
import os
import json
import time
import requests
import tempfile
import shutil
import random
from argparse import ArgumentParser
import logging
from test_utils import get_docker_client, create_docker_connection, fake_model_data
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath('%s/../clipper_admin' % cur_dir))
import clipper_admin as cl
from clipper_admin.deployers.python import create_endpoint as create_py_endpoint
from clipper_admin.deployers.python import deploy_python_closure
from clipper_admin import __version__ as clipper_version, __registry__ as clipper_registry
from clipper_admin.container_manager import CLIPPER_DOCKER_LABEL
logging.basicConfig(
format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class ClipperManagerTestCaseShort(unittest.TestCase):
def setUp(self):
new_name = "admin-test-cluster-{}".format(random.randint(0, 5000))
self.clipper_conn = create_docker_connection(
cleanup=False, start_clipper=True, new_name=new_name)
self.name = new_name
def tearDown(self):
self.clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False, cleanup_name=self.name)
def get_containers(self, container_name):
return get_docker_client().containers.list(filters={
"ancestor": container_name,
"label": "{key}={val}".format(
key=CLIPPER_DOCKER_LABEL,
val=self.clipper_conn.cm.cluster_name)
})
def test_register_model_correct(self):
input_type = "doubles"
model_name = "m"
self.clipper_conn.register_model(model_name, "v1", input_type)
registered_model_info = self.clipper_conn.get_model_info(
model_name, "v1")
self.assertIsNotNone(registered_model_info)
self.clipper_conn.register_model(model_name, "v2", input_type)
registered_model_info = self.clipper_conn.get_model_info(
model_name, "v2")
self.assertIsNotNone(registered_model_info)
def test_register_application_correct(self):
input_type = "doubles"
default_output = "DEFAULT"
slo_micros = 30000
app_name = "testapp"
self.clipper_conn.register_application(app_name, input_type,
default_output, slo_micros)
registered_applications = self.clipper_conn.get_all_apps()
self.assertGreaterEqual(len(registered_applications), 1)
self.assertTrue(app_name in registered_applications)
def test_link_not_registered_model_to_app_fails(self):
not_deployed_model = "test_model"
app_name = "testapp"
input_type = "doubles"
default_output = "DEFAULT"
slo_micros = 30000
self.clipper_conn.register_application(app_name, input_type,
default_output, slo_micros)
with self.assertRaises(cl.ClipperException) as context:
self.clipper_conn.link_model_to_app(app_name, not_deployed_model)
self.assertTrue("No model with name" in str(context.exception))
def test_delete_application_correct(self):
input_type = "doubles"
default_output = "DEFAULT"
slo_micros = 30000
app_name = "testapp"
self.clipper_conn.register_application(app_name, input_type,
default_output, slo_micros)
self.clipper_conn.delete_application(app_name)
registered_applications = self.clipper_conn.get_all_apps()
self.assertEqual(len(registered_applications), 0)
self.assertTrue(app_name not in registered_applications)
def test_get_model_links_when_none_exist_returns_empty_list(self):
app_name = "testapp"
input_type = "doubles"
default_output = "DEFAULT"
slo_micros = 30000
self.clipper_conn.register_application(app_name, input_type,
default_output, slo_micros)
result = self.clipper_conn.get_linked_models(app_name)
self.assertEqual([], result)
def test_link_registered_model_to_app_succeeds(self):
# Register app
app_name = "testapp"
input_type = "doubles"
default_output = "DEFAULT"
slo_micros = 30000
self.clipper_conn.register_application(app_name, input_type,
default_output, slo_micros)
# Register model
model_name = "m"
self.clipper_conn.register_model(model_name, "v1", input_type)
self.clipper_conn.link_model_to_app(app_name, model_name)
result = self.clipper_conn.get_linked_models(app_name)
self.assertEqual([model_name], result)
def get_app_info_for_registered_app_returns_info_dictionary(self):
# Register app
app_name = "testapp"
input_type = "doubles"
default_output = "DEFAULT"
slo_micros = 30000
self.clipper_conn.register_application(app_name, input_type,
default_output, slo_micros)
result = self.clipper_conn.get_app_info(app_name)
self.assertIsNotNone(result)
self.assertEqual(type(result), dict)
def get_app_info_for_nonexistent_app_returns_none(self):
result = self.clipper_conn.get_app_info("fake_app")
self.assertIsNone(result)
def test_set_num_replicas_for_external_model_fails(self):
# Register model
model_name = "m"
input_type = "doubles"
version = "v1"
self.clipper_conn.register_model(model_name, version, input_type)
with self.assertRaises(cl.ClipperException) as context:
self.clipper_conn.set_num_replicas(model_name, 5, version)
self.assertTrue("containerless model" in str(context.exception))
def test_model_version_sets_correctly(self):
model_name = "m"
input_type = "doubles"
v1 = "v1"
self.clipper_conn.register_model(model_name, v1, input_type)
v2 = "v2"
self.clipper_conn.register_model(model_name, v2, input_type)
self.clipper_conn.set_model_version(model_name, v1)
all_models = self.clipper_conn.get_all_models(verbose=True)
models_list_contains_correct_version = False
for model_info in all_models:
version = model_info["model_version"]
if version == v1:
models_list_contains_correct_version = True
self.assertTrue(model_info["is_current_version"])
self.assertTrue(models_list_contains_correct_version)
def test_get_logs_creates_log_files(self):
if not os.path.exists(cl.CLIPPER_TEMP_DIR):
os.makedirs(cl.CLIPPER_TEMP_DIR)
tmp_log_dir = tempfile.mkdtemp(dir=cl.CLIPPER_TEMP_DIR)
log_file_names = self.clipper_conn.get_clipper_logs(
logging_dir=tmp_log_dir)
self.assertIsNotNone(log_file_names)
self.assertGreaterEqual(len(log_file_names), 1)
for file_name in log_file_names:
self.assertTrue(os.path.isfile(file_name))
# Remove temp files
shutil.rmtree(tmp_log_dir)
def test_inspect_instance_returns_json_dict(self):
metrics = self.clipper_conn.inspect_instance()
self.assertEqual(type(metrics), dict)
self.assertGreaterEqual(len(metrics), 1)
def test_model_deploys_successfully(self):
model_name = "m"
version = "v1"
container_name = "{}/noop-container:{}".format(clipper_registry,
clipper_version)
input_type = "doubles"
self.clipper_conn.build_and_deploy_model(
model_name, version, input_type, fake_model_data, container_name)
model_info = self.clipper_conn.get_model_info(model_name, version)
self.assertIsNotNone(model_info)
self.assertEqual(type(model_info), dict)
containers = self.get_containers(container_name)
self.assertEqual(len(containers), 1)
def test_set_num_replicas_for_deployed_model_succeeds(self):
model_name = "set-num-reps-model"
input_type = "doubles"
version = "v1"
container_name = "{}/noop-container:{}".format(clipper_registry,
clipper_version)
input_type = "doubles"
self.clipper_conn.build_and_deploy_model(
model_name, version, input_type, fake_model_data, container_name)
# Version defaults to current version
self.clipper_conn.set_num_replicas(model_name, 4)
time.sleep(1)
num_reps = self.clipper_conn.get_num_replicas(model_name, version)
self.assertEqual(num_reps, 4)
self.clipper_conn.set_num_replicas(model_name, 2, version)
time.sleep(1)
num_reps = self.clipper_conn.get_num_replicas(model_name, version)
self.assertEqual(num_reps, 2)
def test_remove_inactive_containers_succeeds(self):
container_name = "{}/noop-container:{}".format(clipper_registry,
clipper_version)
input_type = "doubles"
model_name = "remove-inactive-test-model"
self.clipper_conn.build_and_deploy_model(
model_name,
1,
input_type,
fake_model_data,
container_name,
num_replicas=2)
containers = self.get_containers(container_name)
self.assertEqual(len(containers), 2)
self.clipper_conn.build_and_deploy_model(
model_name,
2,
input_type,
fake_model_data,
container_name,
num_replicas=3)
containers = self.get_containers(container_name)
self.assertEqual(len(containers), 5)
self.clipper_conn.stop_inactive_model_versions([model_name])
containers = self.get_containers(container_name)
self.assertEqual(len(containers), 3)
def test_stop_models(self):
container_name = "{}/noop-container:{}".format(clipper_registry,
clipper_version)
input_type = "doubles"
mnames = ["jimmypage", "robertplant"]
versions = ["i", "ii"]
for model_name in mnames:
for version in versions:
self.clipper_conn.deploy_model(
model_name,
version,
input_type,
container_name,
num_replicas=1)
containers = self.get_containers(container_name)
self.assertEqual(len(containers), len(mnames) * len(versions))
# stop all versions of jimmypage model
self.clipper_conn.stop_models(mnames[:1])
containers = self.get_containers(container_name)
self.assertEqual(len(containers), len(mnames[1:]) * len(versions))
# After calling this method, the remaining model should be robertplant:i
self.clipper_conn.stop_versioned_models({
"robertplant": ["ii"],
})
containers = self.get_containers(container_name)
self.assertEqual(len(containers), 1)
self.clipper_conn.stop_all_model_containers()
containers = self.get_containers(container_name)
self.assertEqual(len(containers), 0)
def test_python_closure_deploys_successfully(self):
model_name = "m2"
model_version = 1
def predict_func(inputs):
return ["0" for x in inputs]
input_type = "doubles"
deploy_python_closure(self.clipper_conn, model_name, model_version,
input_type, predict_func)
model_info = self.clipper_conn.get_model_info(model_name,
model_version)
self.assertIsNotNone(model_info)
py_minor_version = (sys.version_info.major, sys.version_info.minor)
if py_minor_version < (3, 0):
containers = self.get_containers("{}/python-closure-container:{}".format(
clipper_registry, clipper_version))
elif py_minor_version == (3, 5):
containers = self.get_containers("{}/python35-closure-container:{}".format(
clipper_registry, clipper_version))
elif py_minor_version == (3, 6):
containers = self.get_containers("{}/python36-closure-container:{}".format(
clipper_registry, clipper_version))
else:
msg = (
"Python closure deployer only supports Python 2.7, 3.5, and 3.6. "
"Detected {major}.{minor}").format(
major=sys.version_info.major, minor=sys.version_info.minor)
logger.error(msg)
self.assertGreaterEqual(len(containers), 1)
def test_register_py_endpoint(self):
name = "py-closure-test"
expected_version = 1
def predict_func(inputs):
return ["0" for x in inputs]
input_type = "doubles"
create_py_endpoint(self.clipper_conn, name, input_type, predict_func)
registered_applications = self.clipper_conn.get_all_apps()
self.assertEqual(len(registered_applications), 1)
self.assertTrue(name in registered_applications)
registered_model_info = self.clipper_conn.get_model_info(
name, expected_version)
self.assertIsNotNone(registered_model_info)
linked_models = self.clipper_conn.get_linked_models(name)
self.assertIsNotNone(linked_models)
docker_client = get_docker_client()
py_minor_version = (sys.version_info.major, sys.version_info.minor)
if py_minor_version < (3, 0):
containers = self.get_containers("{}/python-closure-container:{}".format(
clipper_registry, clipper_version))
elif py_minor_version == (3, 5):
containers = self.get_containers("{}/python35-closure-container:{}".format(
clipper_registry, clipper_version))
elif py_minor_version == (3, 6):
containers = self.get_containers("{}/python36-closure-container:{}".format(
clipper_registry, clipper_version))
else:
msg = (
"Python closure deployer only supports Python 2.7, 3.5, and 3.6. "
"Detected {major}.{minor}").format(
major=sys.version_info.major, minor=sys.version_info.minor)
logger.error(msg)
self.assertEqual(len(containers), 1)
def test_test_predict_function(self):
def predict_func(xs):
return [sum(x) for x in xs]
self.clipper_conn.register_application(
name="hello-world",
input_type="doubles",
default_output="-1.0",
slo_micros=100000)
deploy_python_closure(
self.clipper_conn,
name="sum-model",
version=1,
input_type="doubles",
func=predict_func)
self.clipper_conn.link_model_to_app(
app_name="hello-world", model_name="sum-model")
time.sleep(60)
addr = self.clipper_conn.get_query_addr()
url = "http://{addr}/hello-world/predict".format(
addr=addr, app='hello-world')
headers = {"Content-type": "application/json"}
test_input = [1.1, 2.2, 3.3]
pred = requests.post(
url, headers=headers, data=json.dumps({
"input": test_input
})).json()
test_predict_result = self.clipper_conn.test_predict_function(
query={"input": test_input},
func=predict_func,
input_type="doubles")
logger.info("test pred output {}".format(pred))
self.assertEqual([pred['output']],
test_predict_result) # tests single input
test_batch_input = [[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]]
batch_pred = requests.post(
url,
headers=headers,
data=json.dumps({
"input_batch": test_batch_input
})).json()
test_batch_predict_result = self.clipper_conn.test_predict_function(
query={"input_batch": test_batch_input},
func=predict_func,
input_type="doubles")
batch_predictions = batch_pred['batch_predictions']
batch_pred_outputs = [batch['output'] for batch in batch_predictions]
self.assertEqual(batch_pred_outputs,
test_batch_predict_result) # tests batch input
def test_query_specific_model_version(self):
model_name = "testmodel"
app_name = "testapp"
def predict_func1(xs):
return ["1" for _ in xs]
def predict_func2(xs):
return ["2" for _ in xs]
self.clipper_conn.register_application(
name=app_name,
input_type="doubles",
default_output="DEFAULT",
slo_micros=100000)
deploy_python_closure(
self.clipper_conn,
name=model_name,
version="v1",
input_type="doubles",
func=predict_func1)
self.clipper_conn.link_model_to_app(app_name, model_name)
time.sleep(30)
deploy_python_closure(
self.clipper_conn,
name=model_name,
version="v2",
input_type="doubles",
func=predict_func2)
time.sleep(60)
addr = self.clipper_conn.get_query_addr()
url = "http://{addr}/{app}/predict".format(addr=addr, app=app_name)
headers = {"Content-type": "application/json"}
test_input = [1.0, 2.0, 3.0]
pred1_raw = requests.post(
url,
headers=headers,
data=json.dumps({
"input": test_input,
"version": "v1"
}))
try:
pred1 = pred1_raw.json()
self.assertFalse(pred1["default"])
self.assertEqual(pred1['output'], 1)
except ValueError:
logger.error(pred1_raw.text)
self.assertTrue(False)
pred2_raw = requests.post(
url, headers=headers, data=json.dumps({
"input": test_input
}))
try:
pred2 = pred2_raw.json()
self.assertFalse(pred2["default"])
self.assertEqual(pred2['output'], 2)
except ValueError:
logger.error(pred2_raw.text)
self.assertTrue(False)
# Query a version that doesn't exist:
bad_version_name = 'skjfhkdjshfjksdhkjf'
pred3 = requests.post(
url,
headers=headers,
data=json.dumps({
"input": test_input,
"version": bad_version_name
}))
logger.info(pred3.text)
self.assertFalse(pred3.status_code == requests.codes.ok)
self.assertEqual(
pred3.json()['cause'],
"Requested version: {version_name} does not exist for model: {model_name}".
format(version_name=bad_version_name, model_name=model_name))
def test_build_model_with_custom_packages(self):
self.clipper_conn.build_model(
"buildmodeltest",
"py2",
fake_model_data,
"{}/python-closure-container:{}".format(clipper_registry,
clipper_version),
None,
pkgs_to_install=["sympy==1.1.*"])
self.clipper_conn.build_model(
"buildmodeltest",
"py35",
fake_model_data,
"{}/python35-closure-container:{}".format(clipper_registry,
clipper_version),
None,
pkgs_to_install=["sympy==1.1.*"])
self.clipper_conn.build_model(
"buildmodeltest",
"py36",
fake_model_data,
"{}/python35-closure-container:{}".format(clipper_registry,
clipper_version),
None,
pkgs_to_install=["sympy==1.1.*"])
class ClipperManagerTestCaseLong(unittest.TestCase):
cluster_name = "admin-l-{}".format(random.randint(0, 50000))
@classmethod
def setUpClass(self):
self.clipper_conn = create_docker_connection(
cleanup=False, start_clipper=True, new_name=self.cluster_name)
self.app_name_1 = "app3"
self.app_name_2 = "app4"
self.app_name_3 = "app5"
self.app_name_4 = "app6"
self.app_name_5 = "app7"
self.model_name_1 = "m4"
self.model_name_2 = "m5"
self.model_name_3 = "m6"
self.model_name_4 = "m7"
self.model_name_5 = "m8"
self.input_type = "doubles"
self.default_output = "DEFAULT"
self.latency_slo_micros = 30000
self.clipper_conn.register_application(
self.app_name_1, self.input_type, self.default_output,
self.latency_slo_micros)
self.clipper_conn.register_application(
self.app_name_2, self.input_type, self.default_output,
self.latency_slo_micros)
self.clipper_conn.register_application(
self.app_name_3, self.input_type, self.default_output,
self.latency_slo_micros)
self.clipper_conn.register_application(
self.app_name_4,
self.input_type,
self.default_output,
slo_micros=30000000)
self.clipper_conn.register_application(
self.app_name_5, self.input_type, self.default_output,
self.latency_slo_micros)
@classmethod
def tearDownClass(self):
self.clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False, cleanup_name=self.cluster_name)
def get_containers(self, container_name):
return get_docker_client().containers.list(filters={
"ancestor": container_name,
"label": "{key}={val}".format(
key=CLIPPER_DOCKER_LABEL,
val=self.clipper_conn.cm.cluster_name)
})
def test_unlinked_app_returns_default_predictions(self):
addr = self.clipper_conn.get_query_addr()
url = "http://{addr}/{app}/predict".format(
addr=addr, app=self.app_name_2)
test_input = [99.3, 18.9, 67.2, 34.2]
req_json = json.dumps({'input': test_input})
headers = {'Content-type': 'application/json'}
response = requests.post(url, headers=headers, data=req_json)
parsed_response = response.json()
logger.info(parsed_response)
self.assertEqual(parsed_response["output"], self.default_output)
self.assertTrue(parsed_response["default"])
def test_deployed_model_queried_successfully(self):
model_version = 1
container_name = "{}/noop-container:{}".format(clipper_registry,
clipper_version)
self.clipper_conn.build_and_deploy_model(
self.model_name_2, model_version, self.input_type, fake_model_data,
container_name)
self.clipper_conn.link_model_to_app(self.app_name_2, self.model_name_2)
time.sleep(30)
addr = self.clipper_conn.get_query_addr()
url = "http://{addr}/{app}/predict".format(
addr=addr, app=self.app_name_2)
test_input = [99.3, 18.9, 67.2, 34.2]
req_json = json.dumps({'input': test_input})
headers = {'Content-type': 'application/json'}
response = requests.post(url, headers=headers, data=req_json)
parsed_response = response.json()
logger.info(parsed_response)
self.assertNotEqual(parsed_response["output"], self.default_output)
self.assertFalse(parsed_response["default"])
def test_batch_queries_returned_successfully(self):
model_version = 1
container_name = "{}/noop-container:{}".format(clipper_registry,
clipper_version)
self.clipper_conn.build_and_deploy_model(
self.model_name_3, model_version, self.input_type, fake_model_data,
container_name)
self.clipper_conn.link_model_to_app(self.app_name_3, self.model_name_3)
time.sleep(30)
addr = self.clipper_conn.get_query_addr()
url = "http://{addr}/{app}/predict".format(
addr=addr, app=self.app_name_3)
test_input = [[99.3, 18.9, 67.2, 34.2], [101.1, 45.6, 98.0, 99.1],
[12.3, 6.7, 42.1, 12.6], [9.01, 87.6, 70.2, 19.6]]
req_json = json.dumps({'input_batch': test_input})
headers = {'Content-type': 'application/json'}
response = requests.post(url, headers=headers, data=req_json)
parsed_response = response.json()
logger.info(parsed_response)
self.assertEqual(
len(parsed_response["batch_predictions"]), len(test_input))
def test_deployed_python_closure_queried_successfully(self):
model_version = 1
def predict_func(inputs):
return [str(len(x)) for x in inputs]
input_type = "doubles"
deploy_python_closure(self.clipper_conn, self.model_name_1,
model_version, input_type, predict_func)
self.clipper_conn.link_model_to_app(self.app_name_1, self.model_name_1)
time.sleep(60)
received_non_default_prediction = False
addr = self.clipper_conn.get_query_addr()
url = "http://{addr}/{app}/predict".format(
addr=addr, app=self.app_name_1)
test_input = [101.1, 99.5, 107.2]
req_json = json.dumps({'input': test_input})
headers = {'Content-type': 'application/json'}
for i in range(0, 40):
response = requests.post(url, headers=headers, data=req_json)
parsed_response = response.json()
print(parsed_response)
output = parsed_response["output"]
if output == self.default_output:
time.sleep(20)
else:
received_non_default_prediction = True
self.assertEqual(int(output), len(test_input))
break
self.assertTrue(received_non_default_prediction)
def test_fixed_batch_size_model_processes_specified_query_batch_size_when_saturated(
self):
model_version = 1
def predict_func(inputs):
time.sleep(.5)
batch_size = len(inputs)
return [str(batch_size) for _ in inputs]
fixed_batch_size = 9
total_num_queries = fixed_batch_size * 50
deploy_python_closure(
self.clipper_conn,
self.model_name_4,
model_version,
self.input_type,
predict_func,
batch_size=fixed_batch_size)
self.clipper_conn.link_model_to_app(self.app_name_4, self.model_name_4)
time.sleep(60)
addr = self.clipper_conn.get_query_addr()
url = "http://{addr}/{app}/predict".format(
addr=addr, app=self.app_name_4)
test_input = [[float(x) + (j * .001) for x in range(5)]
for j in range(total_num_queries)]
req_json = json.dumps({'input_batch': test_input})
headers = {'Content-type': 'application/json'}
response = requests.post(url, headers=headers, data=req_json)
parsed_response = response.json()
num_max_batch_queries = 0
for prediction in parsed_response["batch_predictions"]:
batch_size = prediction["output"]
if batch_size != self.default_output and int(
batch_size) == fixed_batch_size:
num_max_batch_queries += 1
self.assertGreaterEqual(num_max_batch_queries,
int(total_num_queries * .7))
def test_remove_inactive_container(self):
container_name = "{}/noop-container:{}".format(clipper_registry,
clipper_version)
self.clipper_conn.build_and_deploy_model(
self.model_name_5,
1,
self.input_type,
fake_model_data,
container_name,
num_replicas=2)
containers = self.get_containers(container_name)
self.assertEqual(len(containers), 2)
self.clipper_conn.link_model_to_app(self.app_name_5, self.model_name_5)
time.sleep(30)
# We now have 2 replicas running, both the same model name and Version
# send predictions, assert that we are getting correct response
addr = self.clipper_conn.get_query_addr()
test_input = [101.1, 99.5, 107.2]
req_json = json.dumps({'input': test_input})
headers = {'Content-type': 'application/json'}
for i in range(2):
response = requests.post(
"http://%s/%s/predict" % (addr, self.app_name_5),
headers=headers,
data=req_json)
result = response.json()
self.assertEqual(response.status_code, requests.codes.ok)
self.assertEqual(result["default"], False)
# one of the containers should go inactive
self.clipper_conn.set_num_replicas(
name=self.model_name_5, version=1, num_replicas=1)
time.sleep(100)
containers = self.get_containers(container_name)
self.assertEqual(len(containers), 1)
test_input = [101.1, 99.9]
req_json = json.dumps({'input': test_input})
#send predictions, should still be working
for i in range(2):
response = requests.post(
"http://%s/%s/predict" % (addr, self.app_name_5),
headers=headers,
data=req_json)
result = response.json()
self.assertEqual(response.status_code, requests.codes.ok)
self.assertEqual(result["default"], False)
#2nd container should go inactive
self.clipper_conn.set_num_replicas(
name=self.model_name_5, version=1, num_replicas=0)
time.sleep(100)
containers = self.get_containers(container_name)
self.assertEqual(len(containers), 0)
test_input = [101.1]
req_json = json.dumps({'input': test_input})
#send predictions, should be getting response with message 'no connected models'
for i in range(2):
response = requests.post(
"http://%s/%s/predict" % (addr, self.app_name_5),
headers=headers,
data=req_json)
result = response.json()
self.assertEqual(result["default"], True)
self.assertEqual(result["default_explanation"],
"No connected models found for query")
SHORT_TEST_ORDERING = [
'test_register_model_correct',
'test_register_application_correct',
'test_link_not_registered_model_to_app_fails',
'test_get_model_links_when_none_exist_returns_empty_list',
'test_link_registered_model_to_app_succeeds',
'get_app_info_for_registered_app_returns_info_dictionary',
'get_app_info_for_nonexistent_app_returns_none',
'test_set_num_replicas_for_external_model_fails',
'test_model_version_sets_correctly',
'test_get_logs_creates_log_files',
'test_inspect_instance_returns_json_dict',
'test_model_deploys_successfully',
'test_set_num_replicas_for_deployed_model_succeeds',
'test_remove_inactive_containers_succeeds',
'test_stop_models',
'test_python_closure_deploys_successfully',
'test_register_py_endpoint',
'test_test_predict_function',
'test_build_model_with_custom_packages',
'test_delete_application_correct',
'test_query_specific_model_version',
]
LONG_TEST_ORDERING = [
'test_remove_inactive_container',
'test_unlinked_app_returns_default_predictions',
'test_deployed_model_queried_successfully',
'test_batch_queries_returned_successfully',
'test_deployed_python_closure_queried_successfully',
'test_fixed_batch_size_model_processes_specified_query_batch_size_when_saturated'
]
if __name__ == '__main__':
description = (
"Runs clipper manager tests. If no arguments are specified, all tests are "
"executed.")
parser = ArgumentParser(description)
parser.add_argument(
"-s",
"--short",
action="store_true",
dest="run_short",
help="Run the short suite of test cases")
parser.add_argument(
"-l",
"--long",
action="store_true",
dest="run_long",
help="Run the long suite of test cases")
parser.add_argument(
"-a",
"--all",
action="store_true",
dest="run_all",
help="Run all test cases")
args = parser.parse_args()
# If neither the short nor the long argument is specified,
# we will run all tests
args.run_all = args.run_all or ((not args.run_short) and
(not args.run_long))
suite = unittest.TestSuite()
if args.run_short or args.run_all:
for test in SHORT_TEST_ORDERING:
suite.addTest(ClipperManagerTestCaseShort(test))
if args.run_long or args.run_all:
for test in LONG_TEST_ORDERING:
suite.addTest(ClipperManagerTestCaseLong(test))
result = unittest.TextTestRunner(verbosity=2, failfast=True).run(suite)
sys.exit(not result.wasSuccessful())
| 38.552928 | 90 | 0.623076 |
01567cead6a49028215e38c5750da5add571280f | 3,788 | py | Python | graph_inference/solver/greedysolver.py | codyhan94/epidemic-graph-inference | 4b9f4cf76dd427bbc182e0f6d8fad8ce127172c0 | [
"MIT"
] | null | null | null | graph_inference/solver/greedysolver.py | codyhan94/epidemic-graph-inference | 4b9f4cf76dd427bbc182e0f6d8fad8ce127172c0 | [
"MIT"
] | null | null | null | graph_inference/solver/greedysolver.py | codyhan94/epidemic-graph-inference | 4b9f4cf76dd427bbc182e0f6d8fad8ce127172c0 | [
"MIT"
] | null | null | null | from pdb import set_trace
from graph_inference.solver.basesolver import BaseSolver
from collections import Counter
import numpy as np
import networkx as nx
class GreedySolver(BaseSolver):
"""A simple greedy algorithm for determining parental neighborhoods.
The algorithm comes from Netrapalli's paper.
"""
def __init__(self, cascades):
"""
Sets up an instance of the greedy solver with a list of cascades.
:param cascades: numpy array of cascades
:return: None
"""
super(GreedySolver, self).__init__(cascades)
# Use this boolean array to "remove" cascades by indexing the main
# cascades array with it.
self.remaining_cascades = np.ndarray(len(cascades), dtype=np.bool)
self.remaining_cascades.fill(True)
def solve_node(self, node):
"""
Computes the parental neighborhood for a particular node.
:param node: integer index of node
:return: numpy array of indices corresponding to nodes
"""
parents = []
self.remaining_cascades.fill(True)
potential_parents = Counter()
# set_trace()
while any(self.remaining_cascades):
# Store the cascades that contain a node infected one timestep
# before our chosen node this iteration - they will be removed
# from the remaining cascades.
accounted_cascades = []
for i, alive in enumerate(self.remaining_cascades):
if not alive:
continue
cascade = self.cascades[i]
# This was a seed node or was never infected; can't have anyone
# else infect it.
if cascade[node] == 0 or np.isinf(cascade[node]):
accounted_cascades.append(i)
continue
# Nodes infected one timestep before me could have infected me.
possible_infectors = np.where(
(cascade == cascade[node] - 1) &
(np.isfinite(cascade[node])))[0]
# These cascades are going to be removed for the next iteration
# if np.any(possible_infectors):
if len(possible_infectors) > 0:
accounted_cascades.append(i)
potential_parents.update(possible_infectors)
# Add node that was infected one timestep before me in the
# largest number of observed cascades to my parent neighborhood.
if potential_parents:
parent = potential_parents.most_common(1)[0][0]
parents.append(parent)
# Remove all cascades where node j was infected one step before me.
self.remaining_cascades[accounted_cascades] = False
potential_parents.clear()
return parents
def solve_graph(self, out_file=None):
"""
Applies the greedy algorithm to each node in the graph.
Produces a solution that can optionally be written out to a file. If
no file is specified, then it returns a networkx DiGraph.
:return: nx.DiGraph or Nothing
"""
G = {}
# Solve for each node one at a time for now.
for node in range(len(self.cascades[0, ])):
G[node] = self.solve_node(node)
# Create the DiGraph object to either return or write to a file.
nx_graph = nx.DiGraph(G)
# Reverse the graph in-place to get the edges from parental
# neighborhoods.
# NOTE: setting copy=False in this function is NOT sufficient!
nx_graph = nx_graph.reverse()
if out_file:
nx.write_graphml(nx_graph, path=out_file)
return
return nx_graph
| 35.735849 | 79 | 0.606125 |
e03336bb4d3a985046951faada01906906d2503e | 2,092 | py | Python | pystatsm/tests/lvcorr_test.py | lukepinkel/pystatsm | baa9078a73ab32ec21347aea65555a3f81f10149 | [
"MIT"
] | null | null | null | pystatsm/tests/lvcorr_test.py | lukepinkel/pystatsm | baa9078a73ab32ec21347aea65555a3f81f10149 | [
"MIT"
] | null | null | null | pystatsm/tests/lvcorr_test.py | lukepinkel/pystatsm | baa9078a73ab32ec21347aea65555a3f81f10149 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 9 14:09:39 2022
@author: lukepinkel
"""
import numpy as np
from pystatsm.pylvcorr.sim_lvcorr import LVCorrSim
from pystatsm.pylvcorr.lvcorr import Polychoric, Polyserial
from pystatsm.utilities.numerical_derivs import fo_fc_cd, so_gc_cd
def test_polychoric():
rng = np.random.default_rng(1234)
R = np.array([[1.0, 0.5],
[0.5, 1.0]])
lv_sim = LVCorrSim(corr_mat=R, x_bins=5, y_bins=3, rng=rng)
x, y = lv_sim.simulate(1000)
polychor_model = Polychoric(x=x, y=y)
polychor_model.fit()
grad = lambda r: np.atleast_1d(polychor_model.gradient(r))
hess = lambda r: np.atleast_2d(polychor_model.hessian(r))
x0 = np.atleast_1d(polychor_model.rho_hat)
x1 = np.array([0.2])
assert(np.allclose(fo_fc_cd(polychor_model.loglike, x0), grad(x0), atol=1e-6, rtol=1e-4))
assert(np.allclose(fo_fc_cd(polychor_model.loglike, x1), grad(x1), atol=1e-6, rtol=1e-4))
assert(np.allclose(so_gc_cd(grad, x0), hess(x0), atol=1e-6, rtol=1e-4))
assert(np.allclose(so_gc_cd(grad, x1), hess(x1), atol=1e-6, rtol=1e-4))
assert(polychor_model.optimizer.success)
def test_polyserial():
rng = np.random.default_rng(1234)
R = np.array([[1.0, 0.5],
[0.5, 1.0]])
lv_sim = LVCorrSim(corr_mat=R, x_bins=5, y_bins=False, rng=rng)
x, y = lv_sim.simulate(1000)
polyser_model = Polyserial(x=x, y=y)
polyser_model.fit()
grad = lambda r: np.atleast_1d(polyser_model.gradient(r))
hess = lambda r: np.atleast_2d(polyser_model.hessian(r))
x0 = np.atleast_1d(polyser_model.rho_hat)
x1 = np.array([0.2])
assert(np.allclose(fo_fc_cd(polyser_model.loglike, x0), grad(x0), atol=1e-6, rtol=1e-4))
assert(np.allclose(fo_fc_cd(polyser_model.loglike, x1), grad(x1), atol=1e-6, rtol=1e-4))
assert(np.allclose(so_gc_cd(grad, x0), hess(x0), atol=1e-6, rtol=1e-4))
assert(np.allclose(so_gc_cd(grad, x1), hess(x1), atol=1e-6, rtol=1e-4))
assert(polyser_model.optimizer.success)
| 32.184615 | 93 | 0.663002 |
e4bb7224a410cb1bc79fb2b1cf9d6ca8119c27d7 | 6,988 | py | Python | synapse/config/appservice.py | nordeck/synapse | 637df95de63196033a6da4a6e286e1d58ea517b6 | [
"Apache-2.0"
] | 1 | 2021-05-10T09:44:01.000Z | 2021-05-10T09:44:01.000Z | synapse/config/appservice.py | t2bot/synapse | 62ca554ef09330cb88d46fca8296a859d0adc143 | [
"Apache-2.0"
] | null | null | null | synapse/config/appservice.py | t2bot/synapse | 62ca554ef09330cb88d46fca8296a859d0adc143 | [
"Apache-2.0"
] | 1 | 2021-01-29T01:37:46.000Z | 2021-01-29T01:37:46.000Z | # Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict, List
from urllib import parse as urlparse
import yaml
from netaddr import IPSet
from synapse.appservice import ApplicationService
from synapse.types import JsonDict, UserID
from ._base import Config, ConfigError
logger = logging.getLogger(__name__)
class AppServiceConfig(Config):
section = "appservice"
def read_config(self, config, **kwargs) -> None:
self.app_service_config_files = config.get("app_service_config_files", [])
self.notify_appservices = config.get("notify_appservices", True)
self.track_appservice_user_ips = config.get("track_appservice_user_ips", False)
def generate_config_section(cls, **kwargs) -> str:
return """\
# A list of application service config files to use
#
#app_service_config_files:
# - app_service_1.yaml
# - app_service_2.yaml
# Uncomment to enable tracking of application service IP addresses. Implicitly
# enables MAU tracking for application service users.
#
#track_appservice_user_ips: true
"""
def load_appservices(
hostname: str, config_files: List[str]
) -> List[ApplicationService]:
"""Returns a list of Application Services from the config files."""
if not isinstance(config_files, list):
logger.warning("Expected %s to be a list of AS config files.", config_files)
return []
# Dicts of value -> filename
seen_as_tokens: Dict[str, str] = {}
seen_ids: Dict[str, str] = {}
appservices = []
for config_file in config_files:
try:
with open(config_file) as f:
appservice = _load_appservice(hostname, yaml.safe_load(f), config_file)
if appservice.id in seen_ids:
raise ConfigError(
"Cannot reuse ID across application services: "
"%s (files: %s, %s)"
% (appservice.id, config_file, seen_ids[appservice.id])
)
seen_ids[appservice.id] = config_file
if appservice.token in seen_as_tokens:
raise ConfigError(
"Cannot reuse as_token across application services: "
"%s (files: %s, %s)"
% (
appservice.token,
config_file,
seen_as_tokens[appservice.token],
)
)
seen_as_tokens[appservice.token] = config_file
logger.info("Loaded application service: %s", appservice)
appservices.append(appservice)
except Exception as e:
logger.error("Failed to load appservice from '%s'", config_file)
logger.exception(e)
raise
return appservices
def _load_appservice(
hostname: str, as_info: JsonDict, config_filename: str
) -> ApplicationService:
required_string_fields = ["id", "as_token", "hs_token", "sender_localpart"]
for field in required_string_fields:
if not isinstance(as_info.get(field), str):
raise KeyError(
"Required string field: '%s' (%s)" % (field, config_filename)
)
# 'url' must either be a string or explicitly null, not missing
# to avoid accidentally turning off push for ASes.
if not isinstance(as_info.get("url"), str) and as_info.get("url", "") is not None:
raise KeyError(
"Required string field or explicit null: 'url' (%s)" % (config_filename,)
)
localpart = as_info["sender_localpart"]
if urlparse.quote(localpart) != localpart:
raise ValueError("sender_localpart needs characters which are not URL encoded.")
user = UserID(localpart, hostname)
user_id = user.to_string()
# Rate limiting for users of this AS is on by default (excludes sender)
rate_limited = as_info.get("rate_limited")
if not isinstance(rate_limited, bool):
rate_limited = True
# namespace checks
if not isinstance(as_info.get("namespaces"), dict):
raise KeyError("Requires 'namespaces' object.")
for ns in ApplicationService.NS_LIST:
# specific namespaces are optional
if ns in as_info["namespaces"]:
# expect a list of dicts with exclusive and regex keys
for regex_obj in as_info["namespaces"][ns]:
if not isinstance(regex_obj, dict):
raise ValueError(
"Expected namespace entry in %s to be an object, but got %s",
ns,
regex_obj,
)
if not isinstance(regex_obj.get("regex"), str):
raise ValueError("Missing/bad type 'regex' key in %s", regex_obj)
if not isinstance(regex_obj.get("exclusive"), bool):
raise ValueError(
"Missing/bad type 'exclusive' key in %s", regex_obj
)
# protocols check
protocols = as_info.get("protocols")
if protocols:
# Because strings are lists in python
if isinstance(protocols, str) or not isinstance(protocols, list):
raise KeyError("Optional 'protocols' must be a list if present.")
for p in protocols:
if not isinstance(p, str):
raise KeyError("Bad value for 'protocols' item")
if as_info["url"] is None:
logger.info(
"(%s) Explicitly empty 'url' provided. This application service"
" will not receive events or queries.",
config_filename,
)
ip_range_whitelist = None
if as_info.get("ip_range_whitelist"):
ip_range_whitelist = IPSet(as_info.get("ip_range_whitelist"))
supports_ephemeral = as_info.get("de.sorunome.msc2409.push_ephemeral", False)
return ApplicationService(
token=as_info["as_token"],
hostname=hostname,
url=as_info["url"],
namespaces=as_info["namespaces"],
hs_token=as_info["hs_token"],
sender=user_id,
id=as_info["id"],
supports_ephemeral=supports_ephemeral,
protocols=protocols,
rate_limited=rate_limited,
ip_range_whitelist=ip_range_whitelist,
)
| 38.185792 | 88 | 0.617487 |
ace32ad48024bf33296c6a4a972fb33c11032244 | 2,387 | py | Python | configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500.py | xyzhu8/mmocr | f62b4513f5411bde9f24e1902b1cb1945340022a | [
"Apache-2.0"
] | null | null | null | configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500.py | xyzhu8/mmocr | f62b4513f5411bde9f24e1902b1cb1945340022a | [
"Apache-2.0"
] | null | null | null | configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500.py | xyzhu8/mmocr | f62b4513f5411bde9f24e1902b1cb1945340022a | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../../_base_/models/ocr_mask_rcnn_r50_fpn_ohem_poly.py',
'../../_base_/schedules/schedule_160e.py', '../../_base_/runtime_10e.py'
]
dataset_type = 'IcdarDataset'
data_root = 'data/ctw1500/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# img_norm_cfg = dict(mean=[0, 0, 0], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='ScaleAspectJitter',
img_scale=None,
keep_ratio=False,
resize_type='indep_sample_in_range',
scale_range=(640, 2560)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(
type='RandomCropInstances',
target_size=(640, 640),
mask_type='union_all',
instance_key='gt_masks'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
dict(
type='MultiScaleFlipAug',
# resize the long size to 1600
img_scale=(1600, 1600),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
# no flip
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
val_dataloader=dict(samples_per_gpu=1),
test_dataloader=dict(samples_per_gpu=1),
train=dict(
type=dataset_type,
ann_file=data_root + '/instances_training.json',
img_prefix=data_root + '/imgs',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
# select_first_k=1,
ann_file=data_root + '/instances_test.json',
img_prefix=data_root + '/imgs',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
# select_first_k=1,
ann_file=data_root + '/instances_test.json',
img_prefix=data_root + '/imgs',
pipeline=test_pipeline))
evaluation = dict(interval=10, metric='hmean-iou')
| 34.1 | 77 | 0.624214 |
ce50ab6d59bbc6c98edaa79debd16b6ea94c7ae1 | 1,742 | py | Python | tests/test_app.py | SkymindIO/policy-server | 167fcca8a6b808f70a9e9dff25d04eb54be73601 | [
"Apache-2.0"
] | null | null | null | tests/test_app.py | SkymindIO/policy-server | 167fcca8a6b808f70a9e9dff25d04eb54be73601 | [
"Apache-2.0"
] | null | null | null | tests/test_app.py | SkymindIO/policy-server | 167fcca8a6b808f70a9e9dff25d04eb54be73601 | [
"Apache-2.0"
] | null | null | null | import ray
from fastapi.testclient import TestClient
from app import app
from generate import CLI
# Set up server
CLI.copy_server_files("examples/mouse_and_cheese")
client = TestClient(app)
def test_health_check():
response = client.get("/")
assert response.status_code == 200
assert response.json() == "ok"
def test_docs():
response = client.get("/docs")
assert response.status_code == 200
def test_redoc():
response = client.get("/redoc")
assert response.status_code == 200
payload = {
"mouse_row": 1,
"mouse_col": 1,
"mouse_row_distance": 1,
"mouse_col_distance": 1,
"cheese_row": 1,
"cheese_col": 1,
}
def test_predict_no_auth():
response = client.post("http://localhost:8000/predict/", json=payload)
assert response.status_code == 403
def test_predict_bad_observations():
response = client.post(
"http://localhost:8000/predict/",
json={"bad_payload": 1},
headers={"access-token": "1234567asdfgh"},
)
assert response.status_code == 422
def test_predict():
with TestClient(app) as client:
response = client.post(
"http://localhost:8000/predict/",
json=payload,
headers={"access-token": "1234567asdfgh"},
)
assert response.status_code == 200
assert response.json()
assert len(response.json()["actions"]) == 1
assert response.json()["probability"] >= 0
ray.shutdown()
def test_clients():
with TestClient(app) as client:
response = client.get(
"http://localhost:8000/clients",
headers={"access-token": "1234567asdfgh"},
)
assert response.status_code == 200
ray.shutdown()
| 23.540541 | 74 | 0.629736 |
c5333c27498927fbb68263641578a51baa1e4534 | 1,346 | py | Python | Algorithms on Strings/week2/_529f54ab2b45544c6c1f6b380b6cdeff_Programming-Assignment-2/bwmatching/bwmatching.py | DuyTungHa/Algorithms-and-Data-Structures | d8c40f9788f255ac28ab2f5a6cf7be73f8435412 | [
"MIT"
] | 26 | 2019-05-18T09:59:02.000Z | 2022-01-09T01:04:10.000Z | Algorithms on Strings/week2/_529f54ab2b45544c6c1f6b380b6cdeff_Programming-Assignment-2/bwmatching/bwmatching.py | DuyTungHa/Algorithms-and-Data-Structures | d8c40f9788f255ac28ab2f5a6cf7be73f8435412 | [
"MIT"
] | null | null | null | Algorithms on Strings/week2/_529f54ab2b45544c6c1f6b380b6cdeff_Programming-Assignment-2/bwmatching/bwmatching.py | DuyTungHa/Algorithms-and-Data-Structures | d8c40f9788f255ac28ab2f5a6cf7be73f8435412 | [
"MIT"
] | 8 | 2020-01-19T15:45:07.000Z | 2021-01-09T08:51:41.000Z | # python3
import sys
def PreprocessBWT(bwt):
compact = {'A':0, 'G':0, 'C':0, 'T':0, '$':0}
starts = dict()
occ_counts_before = dict()
for c in 'ACTG$':
occ_counts_before[c] = {0:0}
for i in range(len(bwt)):
compact[bwt[i]] += 1
for k in occ_counts_before:
occ_counts_before[k][i+1] = compact[k]
count = 0
for c,v in sorted(compact.items()):
if v != 0:
starts[c] = count
count += v
return starts, occ_counts_before
def CountOccurrences(pattern, bwt, starts, occ_counts_before):
top = 0
bottom = len(bwt) -1
while top <= bottom:
if pattern:
symbol = pattern[-1]
pattern = pattern[:len(pattern)-1:]
if symbol in bwt[top:bottom+1:1]:
top = starts[symbol] + occ_counts_before[symbol][top]
bottom = starts[symbol] + occ_counts_before[symbol][bottom + 1] -1
else:
return 0
else:
return bottom - top +1
return 0
if __name__ == '__main__':
bwt = sys.stdin.readline().strip()
pattern_count = int(sys.stdin.readline().strip())
patterns = sys.stdin.readline().strip().split()
starts, occ_counts_before = PreprocessBWT(bwt)
occurrence_counts = []
for pattern in patterns:
occurrence_counts.append(CountOccurrences(pattern, bwt, starts, occ_counts_before))
print(' '.join(map(str, occurrence_counts)))
| 26.392157 | 87 | 0.631501 |
70501a9106babc948b13894c3f63033293f4f73d | 3,032 | py | Python | mythril/support/opcodes.py | jhutchings1/mythril | 00d445ace1496afb54ee0936e1779216883f714a | [
"MIT"
] | 1 | 2020-09-26T07:42:03.000Z | 2020-09-26T07:42:03.000Z | mythril/support/opcodes.py | strawberrylady99/mythril | 727d5f3049333f71ccd90a95ca8fe13368aa9c15 | [
"MIT"
] | null | null | null | mythril/support/opcodes.py | strawberrylady99/mythril | 727d5f3049333f71ccd90a95ca8fe13368aa9c15 | [
"MIT"
] | 1 | 2020-08-28T01:17:16.000Z | 2020-08-28T01:17:16.000Z | # This pyethereum opcodes file with added opcodes
from typing import Dict, Tuple
opcodes = {
0x00: ("STOP", 0, 0, 0),
0x01: ("ADD", 2, 1, 3),
0x02: ("MUL", 2, 1, 5),
0x03: ("SUB", 2, 1, 3),
0x04: ("DIV", 2, 1, 5),
0x05: ("SDIV", 2, 1, 5),
0x06: ("MOD", 2, 1, 5),
0x07: ("SMOD", 2, 1, 5),
0x08: ("ADDMOD", 3, 1, 8),
0x09: ("MULMOD", 3, 1, 8),
0x0A: ("EXP", 2, 1, 10),
0x0B: ("SIGNEXTEND", 2, 1, 5),
0x10: ("LT", 2, 1, 3),
0x11: ("GT", 2, 1, 3),
0x12: ("SLT", 2, 1, 3),
0x13: ("SGT", 2, 1, 3),
0x14: ("EQ", 2, 1, 3),
0x15: ("ISZERO", 1, 1, 3),
0x16: ("AND", 2, 1, 3),
0x17: ("OR", 2, 1, 3),
0x18: ("XOR", 2, 1, 3),
0x19: ("NOT", 1, 1, 3),
0x1A: ("BYTE", 2, 1, 3),
0x1B: ("SHL", 2, 1, 3),
0x1C: ("SHR", 2, 1, 3),
0x1D: ("SAR", 2, 1, 3),
0x20: ("SHA3", 2, 1, 30),
0x30: ("ADDRESS", 0, 1, 2),
0x31: ("BALANCE", 1, 1, 20), # now 400
0x32: ("ORIGIN", 0, 1, 2),
0x33: ("CALLER", 0, 1, 2),
0x34: ("CALLVALUE", 0, 1, 2),
0x35: ("CALLDATALOAD", 1, 1, 3),
0x36: ("CALLDATASIZE", 0, 1, 2),
0x37: ("CALLDATACOPY", 3, 0, 3),
0x38: ("CODESIZE", 0, 1, 2),
0x39: ("CODECOPY", 3, 0, 3),
0x3A: ("GASPRICE", 0, 1, 2),
0x3B: ("EXTCODESIZE", 1, 1, 20), # now 700
0x3C: ("EXTCODECOPY", 4, 0, 20), # now 700
0x3D: ("RETURNDATASIZE", 0, 1, 2),
0x3E: ("RETURNDATACOPY", 3, 0, 3),
0x3F: ("EXTCODEHASH", 3, 0, 3),
0x40: ("BLOCKHASH", 1, 1, 20),
0x41: ("COINBASE", 0, 1, 2),
0x42: ("TIMESTAMP", 0, 1, 2),
0x43: ("NUMBER", 0, 1, 2),
0x44: ("DIFFICULTY", 0, 1, 2),
0x45: ("GASLIMIT", 0, 1, 2),
0x46: ("CHAINID", 0, 1, 2),
0x47: ("SELFBALANCE", 0, 1, 5),
0x50: ("POP", 1, 0, 2),
0x51: ("MLOAD", 1, 1, 3),
0x52: ("MSTORE", 2, 0, 3),
0x53: ("MSTORE8", 2, 0, 3),
0x54: ("SLOAD", 1, 1, 50), # 200 now
0x55: ("SSTORE", 2, 0, 0),
0x56: ("JUMP", 1, 0, 8),
0x57: ("JUMPI", 2, 0, 10),
0x58: ("PC", 0, 1, 2),
0x59: ("MSIZE", 0, 1, 2),
0x5A: ("GAS", 0, 1, 2),
0x5B: ("JUMPDEST", 0, 0, 1),
0xA0: ("LOG0", 2, 0, 375),
0xA1: ("LOG1", 3, 0, 750),
0xA2: ("LOG2", 4, 0, 1125),
0xA3: ("LOG3", 5, 0, 1500),
0xA4: ("LOG4", 6, 0, 1875),
0xF0: ("CREATE", 3, 1, 32000),
0xF1: ("CALL", 7, 1, 40), # 700 now
0xF2: ("CALLCODE", 7, 1, 40), # 700 now
0xF3: ("RETURN", 2, 0, 0),
0xF4: ("DELEGATECALL", 6, 1, 40), # 700 now
0xF5: ("CREATE2", 3, 1, 32000),
0xFA: ("STATICCALL", 6, 1, 40),
0xFD: ("REVERT", 2, 0, 0),
0xFF: ("SUICIDE", 1, 0, 0), # 5000 now
} # type: Dict[int, Tuple[str, int, int, int]]
opcodesMetropolis = {0x3D, 0x3E, 0xFA, 0xFD}
for i in range(1, 33):
opcodes[0x5F + i] = ("PUSH" + str(i), 0, 1, 3)
for i in range(1, 17):
opcodes[0x7F + i] = ("DUP" + str(i), i, i + 1, 3)
opcodes[0x8F + i] = ("SWAP" + str(i), i + 1, i + 1, 3)
reverse_opcodes = {}
for o in opcodes:
vars()[opcodes[o][0]] = opcodes[o]
reverse_opcodes[opcodes[o][0]] = o
| 31.257732 | 58 | 0.460092 |
62c3189bf4790830f9a25c125516faf2e334316d | 832 | py | Python | examples/inverse/snr_estimate.py | stevemats/mne-python | 47051833f21bb372d60afc3adbf4305648ac7f69 | [
"BSD-3-Clause"
] | 1,953 | 2015-01-17T20:33:46.000Z | 2022-03-30T04:36:34.000Z | examples/inverse/snr_estimate.py | LiFeng-SECUC/mne-python | 732bb1f994e64e41a8e95dcc10dc98c22cac95c0 | [
"BSD-3-Clause"
] | 8,490 | 2015-01-01T13:04:18.000Z | 2022-03-31T23:02:08.000Z | examples/inverse/snr_estimate.py | LiFeng-SECUC/mne-python | 732bb1f994e64e41a8e95dcc10dc98c22cac95c0 | [
"BSD-3-Clause"
] | 1,130 | 2015-01-08T22:39:27.000Z | 2022-03-30T21:44:26.000Z | # -*- coding: utf-8 -*-
"""
==================================
Estimate data SNR using an inverse
==================================
This estimates the SNR as a function of time for a set of data
using a minimum-norm inverse operator.
"""
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
# %%
from os import path as op
from mne.datasets.sample import data_path
from mne.minimum_norm import read_inverse_operator
from mne import read_evokeds
from mne.viz import plot_snr_estimate
print(__doc__)
data_dir = op.join(data_path(), 'MEG', 'sample')
fname_inv = op.join(data_dir, 'sample_audvis-meg-oct-6-meg-inv.fif')
fname_evoked = op.join(data_dir, 'sample_audvis-ave.fif')
inv = read_inverse_operator(fname_inv)
evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0]
plot_snr_estimate(evoked, inv)
| 25.212121 | 68 | 0.692308 |
f3db4617d838e5a2a0a00c5911102b5b46a54cf9 | 522 | py | Python | 00_simple_examples/09_compress.py | tlananthu/python-learning | cfda5bfa6c613bcbe8bfe00567cd058ce5afc4a2 | [
"Apache-2.0"
] | 1 | 2020-05-11T18:39:54.000Z | 2020-05-11T18:39:54.000Z | 00_simple_examples/09_compress.py | tlananthu/python-learning | cfda5bfa6c613bcbe8bfe00567cd058ce5afc4a2 | [
"Apache-2.0"
] | null | null | null | 00_simple_examples/09_compress.py | tlananthu/python-learning | cfda5bfa6c613bcbe8bfe00567cd058ce5afc4a2 | [
"Apache-2.0"
] | null | null | null | # from itertools import compress
# data = range(10)
# print(list(data))
# even_selector = [1, 0] * 10
# odd_selector = [0, 1] * 10
# print(even_selector)
# even_numbers = list(compress(data, even_selector))
# #odd_numbers = list(compress(data, odd_selector))
# print(even_numbers)
# # print(odd_selector)
# # print(list(data))
# # print(even_numbers)
# # print(odd_numbers)
from itertools import compress
data = range(10)
even_selector = [1, 0] * 10
even_numbers = list(compress(data, even_selector))
print(even_numbers)
| 26.1 | 52 | 0.718391 |
4af1082cc551c03303f68a3e51512f88b069a5b1 | 27,147 | py | Python | gcloud/test__helpers.py | waprin/google-cloud-python | 4456c707e7b915c28d5fdaf5771d203185830dd3 | [
"Apache-2.0"
] | null | null | null | gcloud/test__helpers.py | waprin/google-cloud-python | 4456c707e7b915c28d5fdaf5771d203185830dd3 | [
"Apache-2.0"
] | null | null | null | gcloud/test__helpers.py | waprin/google-cloud-python | 4456c707e7b915c28d5fdaf5771d203185830dd3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class Test__LocalStack(unittest2.TestCase):
def _getTargetClass(self):
from gcloud._helpers import _LocalStack
return _LocalStack
def _makeOne(self):
return self._getTargetClass()()
def test_it(self):
batch1, batch2 = object(), object()
batches = self._makeOne()
self.assertEqual(list(batches), [])
self.assertTrue(batches.top is None)
batches.push(batch1)
self.assertTrue(batches.top is batch1)
batches.push(batch2)
self.assertTrue(batches.top is batch2)
popped = batches.pop()
self.assertTrue(popped is batch2)
self.assertTrue(batches.top is batch1)
self.assertEqual(list(batches), [batch1])
popped = batches.pop()
self.assertTrue(batches.top is None)
self.assertEqual(list(batches), [])
class Test__UTC(unittest2.TestCase):
def _getTargetClass(self):
from gcloud._helpers import _UTC
return _UTC
def _makeOne(self):
return self._getTargetClass()()
def test_module_property(self):
from gcloud import _helpers as MUT
klass = self._getTargetClass()
try:
import pytz
except ImportError:
self.assertTrue(isinstance(MUT.UTC, klass))
else:
self.assertIs(MUT.UTC, pytz.UTC) # pragma: NO COVER
def test_dst(self):
import datetime
tz = self._makeOne()
self.assertEqual(tz.dst(None), datetime.timedelta(0))
def test_fromutc(self):
import datetime
naive_epoch = datetime.datetime.utcfromtimestamp(0)
self.assertEqual(naive_epoch.tzinfo, None)
tz = self._makeOne()
epoch = tz.fromutc(naive_epoch)
self.assertEqual(epoch.tzinfo, tz)
def test_tzname(self):
tz = self._makeOne()
self.assertEqual(tz.tzname(None), 'UTC')
def test_utcoffset(self):
import datetime
tz = self._makeOne()
self.assertEqual(tz.utcoffset(None), datetime.timedelta(0))
def test___repr__(self):
tz = self._makeOne()
self.assertEqual(repr(tz), '<UTC>')
def test___str__(self):
tz = self._makeOne()
self.assertEqual(str(tz), 'UTC')
class Test__ensure_tuple_or_list(unittest2.TestCase):
def _callFUT(self, arg_name, tuple_or_list):
from gcloud._helpers import _ensure_tuple_or_list
return _ensure_tuple_or_list(arg_name, tuple_or_list)
def test_valid_tuple(self):
valid_tuple_or_list = ('a', 'b', 'c', 'd')
result = self._callFUT('ARGNAME', valid_tuple_or_list)
self.assertEqual(result, ['a', 'b', 'c', 'd'])
def test_valid_list(self):
valid_tuple_or_list = ['a', 'b', 'c', 'd']
result = self._callFUT('ARGNAME', valid_tuple_or_list)
self.assertEqual(result, valid_tuple_or_list)
def test_invalid(self):
invalid_tuple_or_list = object()
with self.assertRaises(TypeError):
self._callFUT('ARGNAME', invalid_tuple_or_list)
def test_invalid_iterable(self):
invalid_tuple_or_list = 'FOO'
with self.assertRaises(TypeError):
self._callFUT('ARGNAME', invalid_tuple_or_list)
class Test__app_engine_id(unittest2.TestCase):
def _callFUT(self):
from gcloud._helpers import _app_engine_id
return _app_engine_id()
def test_no_value(self):
from gcloud._testing import _Monkey
from gcloud import _helpers
with _Monkey(_helpers, app_identity=None):
dataset_id = self._callFUT()
self.assertEqual(dataset_id, None)
def test_value_set(self):
from gcloud._testing import _Monkey
from gcloud import _helpers
APP_ENGINE_ID = object()
APP_IDENTITY = _AppIdentity(APP_ENGINE_ID)
with _Monkey(_helpers, app_identity=APP_IDENTITY):
dataset_id = self._callFUT()
self.assertEqual(dataset_id, APP_ENGINE_ID)
class Test__get_credentials_file_project_id(unittest2.TestCase):
def _callFUT(self):
from gcloud._helpers import _file_project_id
return _file_project_id()
def setUp(self):
import os
self.old_env = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')
def tearDown(self):
import os
if (not self.old_env and
'GOOGLE_APPLICATION_CREDENTIALS' in os.environ):
del os.environ['GOOGLE_APPLICATION_CREDENTIALS']
def test_success(self):
import os
from gcloud._testing import _NamedTemporaryFile
with _NamedTemporaryFile() as temp:
with open(temp.name, mode='w') as creds_file:
creds_file.write('{"project_id": "test-project-id"}')
creds_file.seek(0)
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = creds_file.name
self.assertEqual('test-project-id', self._callFUT())
def test_no_environment(self):
self.assertEqual(None, self._callFUT())
class Test__get_default_service_project_id(unittest2.TestCase):
config_path = '.config/gcloud/configurations/'
config_file = 'config_default'
temp_APPDATA = ''
def setUp(self):
import tempfile
import os
self.temp_config_path = tempfile.mkdtemp()
self.temp_APPDATA = os.getenv('APPDATA')
if self.temp_APPDATA: # pragma: NO COVER Windows
os.environ['APPDATA'] = self.temp_config_path
self.config_path = os.path.join(os.getenv('APPDATA', '~/.config'),
'gcloud', 'configurations')
conf_path = os.path.join(self.temp_config_path, self.config_path)
os.makedirs(conf_path)
self.temp_config_file = os.path.join(conf_path, self.config_file)
with open(self.temp_config_file, 'w') as conf_file:
conf_file.write('[core]\nproject = test-project-id')
def tearDown(self):
import shutil
import os
if os.path.exists(self.temp_config_path):
shutil.rmtree(self.temp_config_path)
if self.temp_APPDATA: # pragma: NO COVER Windows
os.environ['APPDATA'] = self.temp_APPDATA
def callFUT(self, project_id=None):
import os
from gcloud._helpers import _default_service_project_id
from gcloud._testing import _Monkey
def mock_expanduser(path=None):
if project_id and path:
__import__('pwd') # Simulate actual expanduser imports.
return self.temp_config_file
return ''
with _Monkey(os.path, expanduser=mock_expanduser):
return _default_service_project_id()
def test_read_from_cli_info(self):
project_id = self.callFUT('test-project-id')
self.assertEqual('test-project-id', project_id)
def test_gae_without_expanduser(self):
import sys
import shutil
shutil.rmtree(self.temp_config_path)
try:
sys.modules['pwd'] = None # Blocks pwd from being imported.
project_id = self.callFUT('test-project-id')
self.assertEqual(None, project_id)
finally:
del sys.modules['pwd'] # Unblocks importing of pwd.
def test_info_value_not_present(self):
import shutil
shutil.rmtree(self.temp_config_path)
project_id = self.callFUT()
self.assertEqual(None, project_id)
class Test__compute_engine_id(unittest2.TestCase):
def _callFUT(self):
from gcloud._helpers import _compute_engine_id
return _compute_engine_id()
def _monkeyConnection(self, connection):
from gcloud._testing import _Monkey
from gcloud import _helpers
def _factory(host, timeout):
connection.host = host
connection.timeout = timeout
return connection
return _Monkey(_helpers, HTTPConnection=_factory)
def test_bad_status(self):
connection = _HTTPConnection(404, None)
with self._monkeyConnection(connection):
dataset_id = self._callFUT()
self.assertEqual(dataset_id, None)
def test_success(self):
COMPUTE_ENGINE_ID = object()
connection = _HTTPConnection(200, COMPUTE_ENGINE_ID)
with self._monkeyConnection(connection):
dataset_id = self._callFUT()
self.assertEqual(dataset_id, COMPUTE_ENGINE_ID)
def test_socket_raises(self):
connection = _TimeoutHTTPConnection()
with self._monkeyConnection(connection):
dataset_id = self._callFUT()
self.assertEqual(dataset_id, None)
class Test__get_production_project(unittest2.TestCase):
def _callFUT(self):
from gcloud._helpers import _get_production_project
return _get_production_project()
def test_no_value(self):
import os
from gcloud._testing import _Monkey
environ = {}
with _Monkey(os, getenv=environ.get):
project = self._callFUT()
self.assertEqual(project, None)
def test_value_set(self):
import os
from gcloud._testing import _Monkey
from gcloud._helpers import PROJECT
MOCK_PROJECT = object()
environ = {PROJECT: MOCK_PROJECT}
with _Monkey(os, getenv=environ.get):
project = self._callFUT()
self.assertEqual(project, MOCK_PROJECT)
class Test__determine_default_project(unittest2.TestCase):
def _callFUT(self, project=None):
from gcloud._helpers import _determine_default_project
return _determine_default_project(project=project)
def _determine_default_helper(self, prod=None, gae=None, gce=None,
file_id=None, srv_id=None, project=None):
from gcloud._testing import _Monkey
from gcloud import _helpers
_callers = []
def prod_mock():
_callers.append('prod_mock')
return prod
def file_id_mock():
_callers.append('file_id_mock')
return file_id
def srv_id_mock():
_callers.append('srv_id_mock')
return srv_id
def gae_mock():
_callers.append('gae_mock')
return gae
def gce_mock():
_callers.append('gce_mock')
return gce
patched_methods = {
'_get_production_project': prod_mock,
'_file_project_id': file_id_mock,
'_default_service_project_id': srv_id_mock,
'_app_engine_id': gae_mock,
'_compute_engine_id': gce_mock,
}
with _Monkey(_helpers, **patched_methods):
returned_project = self._callFUT(project)
return returned_project, _callers
def test_no_value(self):
project, callers = self._determine_default_helper()
self.assertEqual(project, None)
self.assertEqual(callers, ['prod_mock', 'file_id_mock', 'srv_id_mock',
'gae_mock', 'gce_mock'])
def test_explicit(self):
PROJECT = object()
project, callers = self._determine_default_helper(project=PROJECT)
self.assertEqual(project, PROJECT)
self.assertEqual(callers, [])
def test_prod(self):
PROJECT = object()
project, callers = self._determine_default_helper(prod=PROJECT)
self.assertEqual(project, PROJECT)
self.assertEqual(callers, ['prod_mock'])
def test_gae(self):
PROJECT = object()
project, callers = self._determine_default_helper(gae=PROJECT)
self.assertEqual(project, PROJECT)
self.assertEqual(callers, ['prod_mock', 'file_id_mock',
'srv_id_mock', 'gae_mock'])
def test_gce(self):
PROJECT = object()
project, callers = self._determine_default_helper(gce=PROJECT)
self.assertEqual(project, PROJECT)
self.assertEqual(callers, ['prod_mock', 'file_id_mock', 'srv_id_mock',
'gae_mock', 'gce_mock'])
class Test__millis(unittest2.TestCase):
def _callFUT(self, value):
from gcloud._helpers import _millis
return _millis(value)
def test_one_second_from_epoch(self):
import datetime
from gcloud._helpers import UTC
WHEN = datetime.datetime(1970, 1, 1, 0, 0, 1, tzinfo=UTC)
self.assertEqual(self._callFUT(WHEN), 1000)
class Test__microseconds_from_datetime(unittest2.TestCase):
def _callFUT(self, value):
from gcloud._helpers import _microseconds_from_datetime
return _microseconds_from_datetime(value)
def test_it(self):
import datetime
microseconds = 314159
timestamp = datetime.datetime(1970, 1, 1, hour=0,
minute=0, second=0,
microsecond=microseconds)
result = self._callFUT(timestamp)
self.assertEqual(result, microseconds)
class Test__millis_from_datetime(unittest2.TestCase):
def _callFUT(self, value):
from gcloud._helpers import _millis_from_datetime
return _millis_from_datetime(value)
def test_w_none(self):
self.assertTrue(self._callFUT(None) is None)
def test_w_utc_datetime(self):
import datetime
import six
from gcloud._helpers import UTC
from gcloud._helpers import _microseconds_from_datetime
NOW = datetime.datetime.utcnow().replace(tzinfo=UTC)
NOW_MICROS = _microseconds_from_datetime(NOW)
MILLIS = NOW_MICROS // 1000
result = self._callFUT(NOW)
self.assertTrue(isinstance(result, six.integer_types))
self.assertEqual(result, MILLIS)
def test_w_non_utc_datetime(self):
import datetime
import six
from gcloud._helpers import _UTC
from gcloud._helpers import _microseconds_from_datetime
class CET(_UTC):
_tzname = 'CET'
_utcoffset = datetime.timedelta(hours=-1)
zone = CET()
NOW = datetime.datetime(2015, 7, 28, 16, 34, 47, tzinfo=zone)
NOW_MICROS = _microseconds_from_datetime(NOW)
MILLIS = NOW_MICROS // 1000
result = self._callFUT(NOW)
self.assertTrue(isinstance(result, six.integer_types))
self.assertEqual(result, MILLIS)
def test_w_naive_datetime(self):
import datetime
import six
from gcloud._helpers import UTC
from gcloud._helpers import _microseconds_from_datetime
NOW = datetime.datetime.utcnow()
UTC_NOW = NOW.replace(tzinfo=UTC)
UTC_NOW_MICROS = _microseconds_from_datetime(UTC_NOW)
MILLIS = UTC_NOW_MICROS // 1000
result = self._callFUT(NOW)
self.assertTrue(isinstance(result, six.integer_types))
self.assertEqual(result, MILLIS)
class Test__datetime_from_microseconds(unittest2.TestCase):
def _callFUT(self, value):
from gcloud._helpers import _datetime_from_microseconds
return _datetime_from_microseconds(value)
def test_it(self):
import datetime
from gcloud._helpers import UTC
from gcloud._helpers import _microseconds_from_datetime
NOW = datetime.datetime(2015, 7, 29, 17, 45, 21, 123456,
tzinfo=UTC)
NOW_MICROS = _microseconds_from_datetime(NOW)
self.assertEqual(self._callFUT(NOW_MICROS), NOW)
class Test__total_seconds_backport(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud._helpers import _total_seconds_backport
return _total_seconds_backport(*args, **kwargs)
def test_it(self):
import datetime
offset = datetime.timedelta(seconds=3,
microseconds=140000)
result = self._callFUT(offset)
self.assertEqual(result, 3.14)
class Test__total_seconds(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud._helpers import _total_seconds
return _total_seconds(*args, **kwargs)
def test_it(self):
import datetime
offset = datetime.timedelta(seconds=1,
microseconds=414000)
result = self._callFUT(offset)
self.assertEqual(result, 1.414)
class Test__rfc3339_to_datetime(unittest2.TestCase):
def _callFUT(self, dt_str):
from gcloud._helpers import _rfc3339_to_datetime
return _rfc3339_to_datetime(dt_str)
def test_w_bogus_zone(self):
year = 2009
month = 12
day = 17
hour = 12
minute = 44
seconds = 32
micros = 123456789
dt_str = '%d-%02d-%02dT%02d:%02d:%02d.%06dBOGUS' % (
year, month, day, hour, minute, seconds, micros)
with self.assertRaises(ValueError):
self._callFUT(dt_str)
def test_w_microseconds(self):
import datetime
from gcloud._helpers import UTC
year = 2009
month = 12
day = 17
hour = 12
minute = 44
seconds = 32
micros = 123456
dt_str = '%d-%02d-%02dT%02d:%02d:%02d.%06dZ' % (
year, month, day, hour, minute, seconds, micros)
result = self._callFUT(dt_str)
expected_result = datetime.datetime(
year, month, day, hour, minute, seconds, micros, UTC)
self.assertEqual(result, expected_result)
def test_w_naonseconds(self):
year = 2009
month = 12
day = 17
hour = 12
minute = 44
seconds = 32
nanos = 123456789
dt_str = '%d-%02d-%02dT%02d:%02d:%02d.%09dZ' % (
year, month, day, hour, minute, seconds, nanos)
with self.assertRaises(ValueError):
self._callFUT(dt_str)
class Test__rfc3339_nanos_to_datetime(unittest2.TestCase):
def _callFUT(self, dt_str):
from gcloud._helpers import _rfc3339_nanos_to_datetime
return _rfc3339_nanos_to_datetime(dt_str)
def test_w_bogus_zone(self):
year = 2009
month = 12
day = 17
hour = 12
minute = 44
seconds = 32
micros = 123456789
dt_str = '%d-%02d-%02dT%02d:%02d:%02d.%06dBOGUS' % (
year, month, day, hour, minute, seconds, micros)
with self.assertRaises(ValueError):
self._callFUT(dt_str)
def test_w_truncated_nanos(self):
import datetime
from gcloud._helpers import UTC
year = 2009
month = 12
day = 17
hour = 12
minute = 44
seconds = 32
truncateds_and_micros = [
('12345678', 123456),
('1234567', 123456),
('123456', 123456),
('12345', 123450),
('1234', 123400),
('123', 123000),
('12', 120000),
('1', 100000),
]
for truncated, micros in truncateds_and_micros:
dt_str = '%d-%02d-%02dT%02d:%02d:%02d.%sZ' % (
year, month, day, hour, minute, seconds, truncated)
result = self._callFUT(dt_str)
expected_result = datetime.datetime(
year, month, day, hour, minute, seconds, micros, UTC)
self.assertEqual(result, expected_result)
def test_w_naonseconds(self):
import datetime
from gcloud._helpers import UTC
year = 2009
month = 12
day = 17
hour = 12
minute = 44
seconds = 32
nanos = 123456789
micros = nanos // 1000
dt_str = '%d-%02d-%02dT%02d:%02d:%02d.%09dZ' % (
year, month, day, hour, minute, seconds, nanos)
result = self._callFUT(dt_str)
expected_result = datetime.datetime(
year, month, day, hour, minute, seconds, micros, UTC)
self.assertEqual(result, expected_result)
class Test__datetime_to_rfc3339(unittest2.TestCase):
def _callFUT(self, value):
from gcloud._helpers import _datetime_to_rfc3339
return _datetime_to_rfc3339(value)
def test_it(self):
import datetime
from gcloud._helpers import UTC
year = 2009
month = 12
day = 17
hour = 12
minute = 44
seconds = 32
micros = 123456
to_convert = datetime.datetime(
year, month, day, hour, minute, seconds, micros, UTC)
dt_str = '%d-%02d-%02dT%02d:%02d:%02d.%06dZ' % (
year, month, day, hour, minute, seconds, micros)
result = self._callFUT(to_convert)
self.assertEqual(result, dt_str)
class Test__to_bytes(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud._helpers import _to_bytes
return _to_bytes(*args, **kwargs)
def test_with_bytes(self):
value = b'bytes-val'
self.assertEqual(self._callFUT(value), value)
def test_with_unicode(self):
value = u'string-val'
encoded_value = b'string-val'
self.assertEqual(self._callFUT(value), encoded_value)
def test_unicode_non_ascii(self):
value = u'\u2013' # Long hyphen
encoded_value = b'\xe2\x80\x93'
self.assertRaises(UnicodeEncodeError, self._callFUT, value)
self.assertEqual(self._callFUT(value, encoding='utf-8'),
encoded_value)
def test_with_nonstring_type(self):
value = object()
self.assertRaises(TypeError, self._callFUT, value)
class Test__bytes_to_unicode(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud._helpers import _bytes_to_unicode
return _bytes_to_unicode(*args, **kwargs)
def test_with_bytes(self):
value = b'bytes-val'
encoded_value = 'bytes-val'
self.assertEqual(self._callFUT(value), encoded_value)
def test_with_unicode(self):
value = u'string-val'
encoded_value = 'string-val'
self.assertEqual(self._callFUT(value), encoded_value)
def test_with_nonstring_type(self):
value = object()
self.assertRaises(ValueError, self._callFUT, value)
class Test__pb_timestamp_to_datetime(unittest2.TestCase):
def _callFUT(self, timestamp):
from gcloud._helpers import _pb_timestamp_to_datetime
return _pb_timestamp_to_datetime(timestamp)
def test_it(self):
import datetime
from google.protobuf.timestamp_pb2 import Timestamp
from gcloud._helpers import UTC
# Epoch is midnight on January 1, 1970 ...
dt_stamp = datetime.datetime(1970, month=1, day=1, hour=0,
minute=1, second=1, microsecond=1234,
tzinfo=UTC)
# ... so 1 minute and 1 second after is 61 seconds and 1234
# microseconds is 1234000 nanoseconds.
timestamp = Timestamp(seconds=61, nanos=1234000)
self.assertEqual(self._callFUT(timestamp), dt_stamp)
class Test__datetime_to_pb_timestamp(unittest2.TestCase):
def _callFUT(self, when):
from gcloud._helpers import _datetime_to_pb_timestamp
return _datetime_to_pb_timestamp(when)
def test_it(self):
import datetime
from google.protobuf.timestamp_pb2 import Timestamp
from gcloud._helpers import UTC
# Epoch is midnight on January 1, 1970 ...
dt_stamp = datetime.datetime(1970, month=1, day=1, hour=0,
minute=1, second=1, microsecond=1234,
tzinfo=UTC)
# ... so 1 minute and 1 second after is 61 seconds and 1234
# microseconds is 1234000 nanoseconds.
timestamp = Timestamp(seconds=61, nanos=1234000)
self.assertEqual(self._callFUT(dt_stamp), timestamp)
class Test__name_from_project_path(unittest2.TestCase):
PROJECT = 'PROJECT'
THING_NAME = 'THING_NAME'
TEMPLATE = r'projects/(?P<project>\w+)/things/(?P<name>\w+)'
def _callFUT(self, path, project, template):
from gcloud._helpers import _name_from_project_path
return _name_from_project_path(path, project, template)
def test_w_invalid_path_length(self):
PATH = 'projects/foo'
with self.assertRaises(ValueError):
self._callFUT(PATH, None, self.TEMPLATE)
def test_w_invalid_path_segments(self):
PATH = 'foo/%s/bar/%s' % (self.PROJECT, self.THING_NAME)
with self.assertRaises(ValueError):
self._callFUT(PATH, self.PROJECT, self.TEMPLATE)
def test_w_mismatched_project(self):
PROJECT1 = 'PROJECT1'
PROJECT2 = 'PROJECT2'
PATH = 'projects/%s/things/%s' % (PROJECT1, self.THING_NAME)
with self.assertRaises(ValueError):
self._callFUT(PATH, PROJECT2, self.TEMPLATE)
def test_w_valid_data_w_compiled_regex(self):
import re
template = re.compile(self.TEMPLATE)
PATH = 'projects/%s/things/%s' % (self.PROJECT, self.THING_NAME)
name = self._callFUT(PATH, self.PROJECT, template)
self.assertEqual(name, self.THING_NAME)
def test_w_project_passed_as_none(self):
PROJECT1 = 'PROJECT1'
PATH = 'projects/%s/things/%s' % (PROJECT1, self.THING_NAME)
self._callFUT(PATH, None, self.TEMPLATE)
name = self._callFUT(PATH, None, self.TEMPLATE)
self.assertEqual(name, self.THING_NAME)
class _AppIdentity(object):
def __init__(self, app_id):
self.app_id = app_id
def get_application_id(self):
return self.app_id
class _HTTPResponse(object):
def __init__(self, status, data):
self.status = status
self.data = data
def read(self):
return self.data
class _BaseHTTPConnection(object):
host = timeout = None
def __init__(self):
self._close_count = 0
self._called_args = []
self._called_kwargs = []
def request(self, method, uri, **kwargs):
self._called_args.append((method, uri))
self._called_kwargs.append(kwargs)
def close(self):
self._close_count += 1
class _HTTPConnection(_BaseHTTPConnection):
def __init__(self, status, project):
super(_HTTPConnection, self).__init__()
self.status = status
self.project = project
def getresponse(self):
return _HTTPResponse(self.status, self.project)
class _TimeoutHTTPConnection(_BaseHTTPConnection):
def getresponse(self):
import socket
raise socket.timeout('timed out')
| 31.603027 | 78 | 0.633882 |
cc729352cbcee7499b72167cb80659bf697e384e | 1,027 | py | Python | kubernetes/test/test_v1_controller_revision_list.py | kevingessner/python | 3f4d09d260cf0839fae8173852c69e0419188454 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_controller_revision_list.py | kevingessner/python | 3f4d09d260cf0839fae8173852c69e0419188454 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_controller_revision_list.py | kevingessner/python | 3f4d09d260cf0839fae8173852c69e0419188454 | [
"Apache-2.0"
] | 1 | 2018-07-19T16:37:20.000Z | 2018-07-19T16:37:20.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.9.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_controller_revision_list import V1ControllerRevisionList
class TestV1ControllerRevisionList(unittest.TestCase):
""" V1ControllerRevisionList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ControllerRevisionList(self):
"""
Test V1ControllerRevisionList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_controller_revision_list.V1ControllerRevisionList()
pass
if __name__ == '__main__':
unittest.main()
| 22.822222 | 105 | 0.727361 |
9b7ca8b0491df3b170b97e672cc888a7d287613f | 1,446 | py | Python | setup.py | bretth/auth0plus | a5e82b0cabe8a404530a3a444ab83ae352f59b66 | [
"0BSD"
] | 3 | 2016-09-15T14:57:23.000Z | 2021-03-21T08:16:21.000Z | setup.py | bretth/auth0plus | a5e82b0cabe8a404530a3a444ab83ae352f59b66 | [
"0BSD"
] | 1 | 2017-05-19T03:15:05.000Z | 2017-05-19T03:16:00.000Z | setup.py | bretth/auth0plus | a5e82b0cabe8a404530a3a444ab83ae352f59b66 | [
"0BSD"
] | 1 | 2020-10-15T06:43:35.000Z | 2020-10-15T06:43:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'requests',
'combomethod'
]
test_requirements = [
]
setup(
name='auth0plus',
version='0.3.0',
description="Unofficial enhancements to the Auth0-python package",
long_description=readme + '\n\n' + history,
author="Brett Haydon",
author_email='brett@haydon.id.au',
url='https://github.com/bretth/auth0plus',
packages=[
'auth0plus',
'auth0plus.management'
],
package_dir={'auth0plus':
'auth0plus'},
include_package_data=True,
install_requires=requirements,
license="ISCL",
zip_safe=False,
keywords='auth0plus',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
)
| 24.508475 | 70 | 0.614799 |
954f093bd6af5ded50be52c3219d99f27ec05ded | 53,050 | py | Python | lib/tool_shed/util/admin_util.py | lesperry/Metagenomics | a1d8b7d96b32ab83cebe513e889b6ef82f7c1dd6 | [
"CC-BY-3.0"
] | null | null | null | lib/tool_shed/util/admin_util.py | lesperry/Metagenomics | a1d8b7d96b32ab83cebe513e889b6ef82f7c1dd6 | [
"CC-BY-3.0"
] | 2 | 2020-08-19T18:14:59.000Z | 2020-08-20T01:19:12.000Z | lib/tool_shed/util/admin_util.py | lesperry/Metagenomics | a1d8b7d96b32ab83cebe513e889b6ef82f7c1dd6 | [
"CC-BY-3.0"
] | null | null | null | import logging
import time
from sqlalchemy import false, func
from galaxy import util, web
from galaxy.security.validate_user_input import validate_password
from galaxy.util import inflector
from galaxy.util.hash_util import new_secure_hash
from galaxy.web.form_builder import CheckboxField
from tool_shed.util.web_util import escape
log = logging.getLogger(__name__)
compliance_log = logging.getLogger('COMPLIANCE')
class Admin:
# Override these
user_list_grid = None
role_list_grid = None
group_list_grid = None
delete_operation = None
undelete_operation = None
purge_operation = None
@web.expose
@web.require_admin
def index(self, trans, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
return trans.fill_template('/webapps/tool_shed/admin/index.mako',
message=message,
status=status)
@web.expose
@web.require_admin
def center(self, trans, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
return trans.fill_template('/webapps/tool_shed/admin/center.mako',
message=message,
status=status)
@web.expose
@web.require_admin
def roles(self, trans, **kwargs):
if 'operation' in kwargs:
operation = kwargs['operation'].lower().replace('+', ' ')
if operation == "roles":
return self.role(trans, **kwargs)
if operation == "create":
return self.create_role(trans, **kwargs)
if operation == "delete":
return self.mark_role_deleted(trans, **kwargs)
if operation == "undelete":
return self.undelete_role(trans, **kwargs)
if operation == "purge":
return self.purge_role(trans, **kwargs)
if operation == "manage users and groups":
return self.manage_users_and_groups_for_role(trans, **kwargs)
if operation == "manage role associations":
# This is currently used only in the Tool Shed.
return self.manage_role_associations(trans, **kwargs)
if operation == "rename":
return self.rename_role(trans, **kwargs)
# Render the list view
return self.role_list_grid(trans, **kwargs)
@web.expose
@web.require_admin
def create_role(self, trans, **kwd):
params = util.Params(kwd)
message = util.restore_text(params.get('message', ''))
status = params.get('status', 'done')
name = util.restore_text(params.get('name', ''))
description = util.restore_text(params.get('description', ''))
in_users = util.listify(params.get('in_users', []))
out_users = util.listify(params.get('out_users', []))
in_groups = util.listify(params.get('in_groups', []))
out_groups = util.listify(params.get('out_groups', []))
create_group_for_role = params.get('create_group_for_role', '')
create_group_for_role_checked = CheckboxField.is_checked(create_group_for_role)
ok = True
if params.get('create_role_button', False):
if not name or not description:
message = "Enter a valid name and a description."
status = 'error'
ok = False
elif trans.sa_session.query(trans.app.model.Role).filter(trans.app.model.Role.table.c.name == name).first():
message = "Role names must be unique and a role with that name already exists, so choose another name."
status = 'error'
ok = False
else:
# Create the role
role = trans.app.model.Role(name=name, description=description, type=trans.app.model.Role.types.ADMIN)
trans.sa_session.add(role)
# Create the UserRoleAssociations
for user in [trans.sa_session.query(trans.app.model.User).get(x) for x in in_users]:
ura = trans.app.model.UserRoleAssociation(user, role)
trans.sa_session.add(ura)
# Create the GroupRoleAssociations
for group in [trans.sa_session.query(trans.app.model.Group).get(x) for x in in_groups]:
gra = trans.app.model.GroupRoleAssociation(group, role)
trans.sa_session.add(gra)
if create_group_for_role_checked:
# Create the group
group = trans.app.model.Group(name=name)
trans.sa_session.add(group)
# Associate the group with the role
gra = trans.model.GroupRoleAssociation(group, role)
trans.sa_session.add(gra)
num_in_groups = len(in_groups) + 1
else:
num_in_groups = len(in_groups)
trans.sa_session.flush()
message = "Role '%s' has been created with %d associated users and %d associated groups. " \
% (role.name, len(in_users), num_in_groups)
if create_group_for_role_checked:
message += 'One of the groups associated with this role is the newly created group with the same name.'
trans.response.send_redirect(web.url_for(controller='admin',
action='roles',
message=util.sanitize_text(message),
status='done'))
if ok:
for user in trans.sa_session.query(trans.app.model.User) \
.filter(trans.app.model.User.table.c.deleted == false()) \
.order_by(trans.app.model.User.table.c.email):
out_users.append((user.id, user.email))
for group in trans.sa_session.query(trans.app.model.Group) \
.filter(trans.app.model.Group.table.c.deleted == false()) \
.order_by(trans.app.model.Group.table.c.name):
out_groups.append((group.id, group.name))
return trans.fill_template('/webapps/tool_shed/admin/dataset_security/role/role_create.mako',
name=name,
description=description,
in_users=in_users,
out_users=out_users,
in_groups=in_groups,
out_groups=out_groups,
create_group_for_role_checked=create_group_for_role_checked,
message=message,
status=status)
@web.expose
@web.require_admin
def rename_role(self, trans, **kwd):
params = util.Params(kwd)
message = util.restore_text(params.get('message', ''))
status = params.get('status', 'done')
id = params.get('id', None)
if not id:
message = "No role ids received for renaming"
trans.response.send_redirect(web.url_for(controller='admin',
action='roles',
message=message,
status='error'))
role = get_role(trans, id)
if params.get('rename_role_button', False):
old_name = role.name
new_name = util.restore_text(params.name)
new_description = util.restore_text(params.description)
if not new_name:
message = 'Enter a valid name'
status = 'error'
else:
existing_role = trans.sa_session.query(trans.app.model.Role).filter(trans.app.model.Role.table.c.name == new_name).first()
if existing_role and existing_role.id != role.id:
message = 'A role with that name already exists'
status = 'error'
else:
if not (role.name == new_name and role.description == new_description):
role.name = new_name
role.description = new_description
trans.sa_session.add(role)
trans.sa_session.flush()
message = "Role '{}' has been renamed to '{}'".format(old_name, new_name)
return trans.response.send_redirect(web.url_for(controller='admin',
action='roles',
message=util.sanitize_text(message),
status='done'))
return trans.fill_template('/webapps/tool_shed/admin/dataset_security/role/role_rename.mako',
role=role,
message=message,
status=status)
@web.expose
@web.require_admin
def manage_users_and_groups_for_role(self, trans, **kwd):
params = util.Params(kwd)
message = util.restore_text(params.get('message', ''))
status = params.get('status', 'done')
id = params.get('id', None)
if not id:
message = "No role ids received for managing users and groups"
trans.response.send_redirect(web.url_for(controller='admin',
action='roles',
message=message,
status='error'))
role = get_role(trans, id)
if params.get('role_members_edit_button', False):
in_users = [trans.sa_session.query(trans.app.model.User).get(x) for x in util.listify(params.in_users)]
if trans.webapp.name == 'galaxy':
for ura in role.users:
user = trans.sa_session.query(trans.app.model.User).get(ura.user_id)
if user not in in_users:
# Delete DefaultUserPermissions for previously associated users that have been removed from the role
for dup in user.default_permissions:
if role == dup.role:
trans.sa_session.delete(dup)
# Delete DefaultHistoryPermissions for previously associated users that have been removed from the role
for history in user.histories:
for dhp in history.default_permissions:
if role == dhp.role:
trans.sa_session.delete(dhp)
trans.sa_session.flush()
in_groups = [trans.sa_session.query(trans.app.model.Group).get(x) for x in util.listify(params.in_groups)]
trans.app.security_agent.set_entity_role_associations(roles=[role], users=in_users, groups=in_groups)
trans.sa_session.refresh(role)
message = "Role '%s' has been updated with %d associated users and %d associated groups" % (role.name, len(in_users), len(in_groups))
trans.response.send_redirect(web.url_for(controller='admin',
action='roles',
message=util.sanitize_text(message),
status=status))
in_users = []
out_users = []
in_groups = []
out_groups = []
for user in trans.sa_session.query(trans.app.model.User) \
.filter(trans.app.model.User.table.c.deleted == false()) \
.order_by(trans.app.model.User.table.c.email):
if user in [x.user for x in role.users]:
in_users.append((user.id, user.email))
else:
out_users.append((user.id, user.email))
for group in trans.sa_session.query(trans.app.model.Group) \
.filter(trans.app.model.Group.table.c.deleted == false()) \
.order_by(trans.app.model.Group.table.c.name):
if group in [x.group for x in role.groups]:
in_groups.append((group.id, group.name))
else:
out_groups.append((group.id, group.name))
library_dataset_actions = {}
if trans.webapp.name == 'galaxy' and len(role.dataset_actions) < 25:
# Build a list of tuples that are LibraryDatasetDatasetAssociationss followed by a list of actions
# whose DatasetPermissions is associated with the Role
# [ ( LibraryDatasetDatasetAssociation [ action, action ] ) ]
for dp in role.dataset_actions:
for ldda in trans.sa_session.query(trans.app.model.LibraryDatasetDatasetAssociation) \
.filter(trans.app.model.LibraryDatasetDatasetAssociation.dataset_id == dp.dataset_id):
root_found = False
folder_path = ''
folder = ldda.library_dataset.folder
while not root_found:
folder_path = '{} / {}'.format(folder.name, folder_path)
if not folder.parent:
root_found = True
else:
folder = folder.parent
folder_path = '{} {}'.format(folder_path, ldda.name)
library = trans.sa_session.query(trans.app.model.Library) \
.filter(trans.app.model.Library.table.c.root_folder_id == folder.id) \
.first()
if library not in library_dataset_actions:
library_dataset_actions[library] = {}
try:
library_dataset_actions[library][folder_path].append(dp.action)
except Exception:
library_dataset_actions[library][folder_path] = [dp.action]
else:
message = "Not showing associated datasets, there are too many."
status = 'info'
return trans.fill_template('/webapps/tool_shed/admin/dataset_security/role/role.mako',
role=role,
in_users=in_users,
out_users=out_users,
in_groups=in_groups,
out_groups=out_groups,
library_dataset_actions=library_dataset_actions,
message=message,
status=status)
@web.expose
@web.require_admin
def mark_role_deleted(self, trans, **kwd):
id = kwd.get('id', None)
if not id:
message = "No role ids received for deleting"
trans.response.send_redirect(web.url_for(controller='admin',
action='roles',
message=message,
status='error'))
ids = util.listify(id)
message = "Deleted %d roles: " % len(ids)
for role_id in ids:
role = get_role(trans, role_id)
role.deleted = True
trans.sa_session.add(role)
trans.sa_session.flush()
message += " %s " % role.name
trans.response.send_redirect(web.url_for(controller='admin',
action='roles',
message=util.sanitize_text(message),
status='done'))
@web.expose
@web.require_admin
def undelete_role(self, trans, **kwd):
id = kwd.get('id', None)
if not id:
message = "No role ids received for undeleting"
trans.response.send_redirect(web.url_for(controller='admin',
action='roles',
message=message,
status='error'))
ids = util.listify(id)
count = 0
undeleted_roles = ""
for role_id in ids:
role = get_role(trans, role_id)
if not role.deleted:
message = "Role '%s' has not been deleted, so it cannot be undeleted." % role.name
trans.response.send_redirect(web.url_for(controller='admin',
action='roles',
message=util.sanitize_text(message),
status='error'))
role.deleted = False
trans.sa_session.add(role)
trans.sa_session.flush()
count += 1
undeleted_roles += " %s" % role.name
message = "Undeleted %d roles: %s" % (count, undeleted_roles)
trans.response.send_redirect(web.url_for(controller='admin',
action='roles',
message=util.sanitize_text(message),
status='done'))
@web.expose
@web.require_admin
def purge_role(self, trans, **kwd):
# This method should only be called for a Role that has previously been deleted.
# Purging a deleted Role deletes all of the following from the database:
# - UserRoleAssociations where role_id == Role.id
# - DefaultUserPermissions where role_id == Role.id
# - DefaultHistoryPermissions where role_id == Role.id
# - GroupRoleAssociations where role_id == Role.id
# - DatasetPermissionss where role_id == Role.id
id = kwd.get('id', None)
if not id:
message = "No role ids received for purging"
trans.response.send_redirect(web.url_for(controller='admin',
action='roles',
message=util.sanitize_text(message),
status='error'))
ids = util.listify(id)
message = "Purged %d roles: " % len(ids)
for role_id in ids:
role = get_role(trans, role_id)
if not role.deleted:
message = "Role '%s' has not been deleted, so it cannot be purged." % role.name
trans.response.send_redirect(web.url_for(controller='admin',
action='roles',
message=util.sanitize_text(message),
status='error'))
# Delete UserRoleAssociations
for ura in role.users:
user = trans.sa_session.query(trans.app.model.User).get(ura.user_id)
# Delete DefaultUserPermissions for associated users
for dup in user.default_permissions:
if role == dup.role:
trans.sa_session.delete(dup)
# Delete DefaultHistoryPermissions for associated users
for history in user.histories:
for dhp in history.default_permissions:
if role == dhp.role:
trans.sa_session.delete(dhp)
trans.sa_session.delete(ura)
# Delete GroupRoleAssociations
for gra in role.groups:
trans.sa_session.delete(gra)
# Delete DatasetPermissionss
for dp in role.dataset_actions:
trans.sa_session.delete(dp)
trans.sa_session.flush()
message += " %s " % role.name
trans.response.send_redirect(web.url_for(controller='admin',
action='roles',
message=util.sanitize_text(message),
status='done'))
@web.expose
@web.require_admin
def groups(self, trans, **kwargs):
if 'operation' in kwargs:
operation = kwargs['operation'].lower().replace('+', ' ')
if operation == "groups":
return self.group(trans, **kwargs)
if operation == "create":
return self.create_group(trans, **kwargs)
if operation == "delete":
return self.mark_group_deleted(trans, **kwargs)
if operation == "undelete":
return self.undelete_group(trans, **kwargs)
if operation == "purge":
return self.purge_group(trans, **kwargs)
if operation == "manage users and roles":
return self.manage_users_and_roles_for_group(trans, **kwargs)
if operation == "rename":
return self.rename_group(trans, **kwargs)
# Render the list view
return self.group_list_grid(trans, **kwargs)
@web.expose
@web.require_admin
def rename_group(self, trans, **kwd):
params = util.Params(kwd)
message = util.restore_text(params.get('message', ''))
status = params.get('status', 'done')
id = params.get('id', None)
if not id:
message = "No group ids received for renaming"
trans.response.send_redirect(web.url_for(controller='admin',
action='groups',
message=message,
status='error'))
group = get_group(trans, id)
if params.get('rename_group_button', False):
old_name = group.name
new_name = util.restore_text(params.name)
if not new_name:
message = 'Enter a valid name'
status = 'error'
else:
existing_group = trans.sa_session.query(trans.app.model.Group).filter(trans.app.model.Group.table.c.name == new_name).first()
if existing_group and existing_group.id != group.id:
message = 'A group with that name already exists'
status = 'error'
else:
if group.name != new_name:
group.name = new_name
trans.sa_session.add(group)
trans.sa_session.flush()
message = "Group '{}' has been renamed to '{}'".format(old_name, new_name)
return trans.response.send_redirect(web.url_for(controller='admin',
action='groups',
message=util.sanitize_text(message),
status='done'))
return trans.fill_template('/webapps/tool_shed/admin/dataset_security/group/group_rename.mako',
group=group,
message=message,
status=status)
@web.expose
@web.require_admin
def manage_users_and_roles_for_group(self, trans, **kwd):
params = util.Params(kwd)
message = util.restore_text(params.get('message', ''))
status = params.get('status', 'done')
group = get_group(trans, params.id)
if params.get('group_roles_users_edit_button', False):
in_roles = [trans.sa_session.query(trans.app.model.Role).get(x) for x in util.listify(params.in_roles)]
in_users = [trans.sa_session.query(trans.app.model.User).get(x) for x in util.listify(params.in_users)]
trans.app.security_agent.set_entity_group_associations(groups=[group], roles=in_roles, users=in_users)
trans.sa_session.refresh(group)
message += "Group '%s' has been updated with %d associated roles and %d associated users" % (group.name, len(in_roles), len(in_users))
trans.response.send_redirect(web.url_for(controller='admin',
action='groups',
message=util.sanitize_text(message),
status=status))
in_roles = []
out_roles = []
in_users = []
out_users = []
for role in trans.sa_session.query(trans.app.model.Role) \
.filter(trans.app.model.Role.table.c.deleted == false()) \
.order_by(trans.app.model.Role.table.c.name):
if role in [x.role for x in group.roles]:
in_roles.append((role.id, role.name))
else:
out_roles.append((role.id, role.name))
for user in trans.sa_session.query(trans.app.model.User) \
.filter(trans.app.model.User.table.c.deleted == false()) \
.order_by(trans.app.model.User.table.c.email):
if user in [x.user for x in group.users]:
in_users.append((user.id, user.email))
else:
out_users.append((user.id, user.email))
message += 'Group %s is currently associated with %d roles and %d users' % (group.name, len(in_roles), len(in_users))
return trans.fill_template('/webapps/tool_shed/admin/dataset_security/group/group.mako',
group=group,
in_roles=in_roles,
out_roles=out_roles,
in_users=in_users,
out_users=out_users,
message=message,
status=status)
@web.expose
@web.require_admin
def create_group(self, trans, **kwd):
params = util.Params(kwd)
message = util.restore_text(params.get('message', ''))
status = params.get('status', 'done')
name = util.restore_text(params.get('name', ''))
in_users = util.listify(params.get('in_users', []))
out_users = util.listify(params.get('out_users', []))
in_roles = util.listify(params.get('in_roles', []))
out_roles = util.listify(params.get('out_roles', []))
create_role_for_group = params.get('create_role_for_group', '')
create_role_for_group_checked = CheckboxField.is_checked(create_role_for_group)
ok = True
if params.get('create_group_button', False):
if not name:
message = "Enter a valid name."
status = 'error'
ok = False
elif trans.sa_session.query(trans.app.model.Group).filter(trans.app.model.Group.table.c.name == name).first():
message = "Group names must be unique and a group with that name already exists, so choose another name."
status = 'error'
ok = False
else:
# Create the group
group = trans.app.model.Group(name=name)
trans.sa_session.add(group)
trans.sa_session.flush()
# Create the UserRoleAssociations
for user in [trans.sa_session.query(trans.app.model.User).get(x) for x in in_users]:
uga = trans.app.model.UserGroupAssociation(user, group)
trans.sa_session.add(uga)
# Create the GroupRoleAssociations
for role in [trans.sa_session.query(trans.app.model.Role).get(x) for x in in_roles]:
gra = trans.app.model.GroupRoleAssociation(group, role)
trans.sa_session.add(gra)
if create_role_for_group_checked:
# Create the role
role = trans.app.model.Role(name=name, description='Role for group %s' % name)
trans.sa_session.add(role)
# Associate the role with the group
gra = trans.model.GroupRoleAssociation(group, role)
trans.sa_session.add(gra)
num_in_roles = len(in_roles) + 1
else:
num_in_roles = len(in_roles)
trans.sa_session.flush()
message = "Group '%s' has been created with %d associated users and %d associated roles. " \
% (group.name, len(in_users), num_in_roles)
if create_role_for_group_checked:
message += 'One of the roles associated with this group is the newly created role with the same name.'
trans.response.send_redirect(web.url_for(controller='admin',
action='groups',
message=util.sanitize_text(message),
status='done'))
if ok:
for user in trans.sa_session.query(trans.app.model.User) \
.filter(trans.app.model.User.table.c.deleted == false()) \
.order_by(trans.app.model.User.table.c.email):
out_users.append((user.id, user.email))
for role in trans.sa_session.query(trans.app.model.Role) \
.filter(trans.app.model.Role.table.c.deleted == false()) \
.order_by(trans.app.model.Role.table.c.name):
out_roles.append((role.id, role.name))
return trans.fill_template('/webapps/tool_shed/admin/dataset_security/group/group_create.mako',
name=name,
in_users=in_users,
out_users=out_users,
in_roles=in_roles,
out_roles=out_roles,
create_role_for_group_checked=create_role_for_group_checked,
message=message,
status=status)
@web.expose
@web.require_admin
def mark_group_deleted(self, trans, **kwd):
params = util.Params(kwd)
id = params.get('id', None)
if not id:
message = "No group ids received for marking deleted"
trans.response.send_redirect(web.url_for(controller='admin',
action='groups',
message=message,
status='error'))
ids = util.listify(id)
message = "Deleted %d groups: " % len(ids)
for group_id in ids:
group = get_group(trans, group_id)
group.deleted = True
trans.sa_session.add(group)
trans.sa_session.flush()
message += " %s " % group.name
trans.response.send_redirect(web.url_for(controller='admin',
action='groups',
message=util.sanitize_text(message),
status='done'))
@web.expose
@web.require_admin
def undelete_group(self, trans, **kwd):
id = kwd.get('id', None)
if not id:
message = "No group ids received for undeleting"
trans.response.send_redirect(web.url_for(controller='admin',
action='groups',
message=message,
status='error'))
ids = util.listify(id)
count = 0
undeleted_groups = ""
for group_id in ids:
group = get_group(trans, group_id)
if not group.deleted:
message = "Group '%s' has not been deleted, so it cannot be undeleted." % group.name
trans.response.send_redirect(web.url_for(controller='admin',
action='groups',
message=util.sanitize_text(message),
status='error'))
group.deleted = False
trans.sa_session.add(group)
trans.sa_session.flush()
count += 1
undeleted_groups += " %s" % group.name
message = "Undeleted %d groups: %s" % (count, undeleted_groups)
trans.response.send_redirect(web.url_for(controller='admin',
action='groups',
message=util.sanitize_text(message),
status='done'))
@web.expose
@web.require_admin
def purge_group(self, trans, **kwd):
# This method should only be called for a Group that has previously been deleted.
# Purging a deleted Group simply deletes all UserGroupAssociations and GroupRoleAssociations.
id = kwd.get('id', None)
if not id:
message = "No group ids received for purging"
trans.response.send_redirect(web.url_for(controller='admin',
action='groups',
message=util.sanitize_text(message),
status='error'))
ids = util.listify(id)
message = "Purged %d groups: " % len(ids)
for group_id in ids:
group = get_group(trans, group_id)
if not group.deleted:
# We should never reach here, but just in case there is a bug somewhere...
message = "Group '%s' has not been deleted, so it cannot be purged." % group.name
trans.response.send_redirect(web.url_for(controller='admin',
action='groups',
message=util.sanitize_text(message),
status='error'))
# Delete UserGroupAssociations
for uga in group.users:
trans.sa_session.delete(uga)
# Delete GroupRoleAssociations
for gra in group.roles:
trans.sa_session.delete(gra)
trans.sa_session.flush()
message += " %s " % group.name
trans.response.send_redirect(web.url_for(controller='admin',
action='groups',
message=util.sanitize_text(message),
status='done'))
@web.expose
@web.require_admin
def create_new_user(self, trans, **kwd):
return trans.response.send_redirect(web.url_for(controller='user',
action='create',
cntrller='admin'))
@web.expose
@web.require_admin
def reset_user_password(self, trans, **kwd):
user_id = kwd.get('id', None)
if not user_id:
message = "No users received for resetting passwords."
trans.response.send_redirect(web.url_for(controller='admin',
action='users',
message=message,
status='error'))
user_ids = util.listify(user_id)
if 'reset_user_password_button' in kwd:
message = ''
status = ''
for user_id in user_ids:
user = get_user(trans, user_id)
password = kwd.get('password', None)
confirm = kwd.get('confirm', None)
message = validate_password(trans, password, confirm)
if message:
status = 'error'
break
else:
user.set_password_cleartext(password)
trans.sa_session.add(user)
trans.sa_session.flush()
if not message and not status:
message = "Passwords reset for %d %s." % (len(user_ids), inflector.cond_plural(len(user_ids), 'user'))
status = 'done'
trans.response.send_redirect(web.url_for(controller='admin',
action='users',
message=util.sanitize_text(message),
status=status))
users = [get_user(trans, user_id) for user_id in user_ids]
if len(user_ids) > 1:
user_id = ','.join(user_ids)
return trans.fill_template('/webapps/tool_shed/admin/user/reset_password.mako',
id=user_id,
users=users,
password='',
confirm='')
@web.expose
@web.require_admin
def mark_user_deleted(self, trans, **kwd):
id = kwd.get('id', None)
if not id:
message = "No user ids received for deleting"
trans.response.send_redirect(web.url_for(controller='admin',
action='users',
message=message,
status='error'))
ids = util.listify(id)
message = "Deleted %d users: " % len(ids)
for user_id in ids:
user = get_user(trans, user_id)
user.deleted = True
compliance_log.info('delete-user-event: %s' % user_id)
# See lib/galaxy/webapps/tool_shed/controllers/admin.py
pseudorandom_value = str(int(time.time()))
email_hash = new_secure_hash(user.email + pseudorandom_value)
uname_hash = new_secure_hash(user.username + pseudorandom_value)
for role in user.all_roles():
print(role, self.app.config.redact_username_during_deletion, self.app.config.redact_email_during_deletion)
if self.app.config.redact_username_during_deletion:
role.name = role.name.replace(user.username, uname_hash)
role.description = role.description.replace(user.username, uname_hash)
if self.app.config.redact_email_during_deletion:
role.name = role.name.replace(user.email, email_hash)
role.description = role.description.replace(user.email, email_hash)
if self.app.config.redact_email_during_deletion:
user.email = email_hash
if self.app.config.redact_username_during_deletion:
user.username = uname_hash
trans.sa_session.add(user)
trans.sa_session.flush()
message += " %s " % user.email
trans.response.send_redirect(web.url_for(controller='admin',
action='users',
message=util.sanitize_text(message),
status='done'))
@web.expose
@web.require_admin
def undelete_user(self, trans, **kwd):
id = kwd.get('id', None)
if not id:
message = "No user ids received for undeleting"
trans.response.send_redirect(web.url_for(controller='admin',
action='users',
message=message,
status='error'))
ids = util.listify(id)
count = 0
undeleted_users = ""
for user_id in ids:
user = get_user(trans, user_id)
if not user.deleted:
message = "User '%s' has not been deleted, so it cannot be undeleted." % user.email
trans.response.send_redirect(web.url_for(controller='admin',
action='users',
message=util.sanitize_text(message),
status='error'))
user.deleted = False
trans.sa_session.add(user)
trans.sa_session.flush()
count += 1
undeleted_users += " %s" % user.email
message = "Undeleted %d users: %s" % (count, undeleted_users)
trans.response.send_redirect(web.url_for(controller='admin',
action='users',
message=util.sanitize_text(message),
status='done'))
@web.expose
@web.require_admin
def purge_user(self, trans, **kwd):
# This method should only be called for a User that has previously been deleted.
# We keep the User in the database ( marked as purged ), and stuff associated
# with the user's private role in case we want the ability to unpurge the user
# some time in the future.
# Purging a deleted User deletes all of the following:
# - History where user_id = User.id
# - HistoryDatasetAssociation where history_id = History.id
# - Dataset where HistoryDatasetAssociation.dataset_id = Dataset.id
# - UserGroupAssociation where user_id == User.id
# - UserRoleAssociation where user_id == User.id EXCEPT FOR THE PRIVATE ROLE
# - UserAddress where user_id == User.id
# Purging Histories and Datasets must be handled via the cleanup_datasets.py script
id = kwd.get('id', None)
if not id:
message = "No user ids received for purging"
trans.response.send_redirect(web.url_for(controller='admin',
action='users',
message=util.sanitize_text(message),
status='error'))
ids = util.listify(id)
message = "Purged %d users: " % len(ids)
for user_id in ids:
user = get_user(trans, user_id)
if not user.deleted:
# We should never reach here, but just in case there is a bug somewhere...
message = "User '%s' has not been deleted, so it cannot be purged." % user.email
trans.response.send_redirect(web.url_for(controller='admin',
action='users',
message=util.sanitize_text(message),
status='error'))
private_role = trans.app.security_agent.get_private_user_role(user)
# Delete History
for h in user.active_histories:
trans.sa_session.refresh(h)
for hda in h.active_datasets:
# Delete HistoryDatasetAssociation
d = trans.sa_session.query(trans.app.model.Dataset).get(hda.dataset_id)
# Delete Dataset
if not d.deleted:
d.deleted = True
trans.sa_session.add(d)
hda.deleted = True
trans.sa_session.add(hda)
h.deleted = True
trans.sa_session.add(h)
# Delete UserGroupAssociations
for uga in user.groups:
trans.sa_session.delete(uga)
# Delete UserRoleAssociations EXCEPT FOR THE PRIVATE ROLE
for ura in user.roles:
if ura.role_id != private_role.id:
trans.sa_session.delete(ura)
# Delete UserAddresses
for address in user.addresses:
trans.sa_session.delete(address)
# Purge the user
user.purged = True
trans.sa_session.add(user)
trans.sa_session.flush()
message += "%s " % user.email
trans.response.send_redirect(web.url_for(controller='admin',
action='users',
message=util.sanitize_text(message),
status='done'))
@web.expose
@web.require_admin
def users(self, trans, **kwd):
if 'operation' in kwd:
operation = kwd['operation'].lower()
if operation == "roles":
return self.user(trans, **kwd)
elif operation == "reset password":
return self.reset_user_password(trans, **kwd)
elif operation == "delete":
return self.mark_user_deleted(trans, **kwd)
elif operation == "undelete":
return self.undelete_user(trans, **kwd)
elif operation == "purge":
return self.purge_user(trans, **kwd)
elif operation == "create":
return self.create_new_user(trans, **kwd)
elif operation == "manage roles and groups":
return self.manage_roles_and_groups_for_user(trans, **kwd)
if trans.app.config.allow_user_deletion:
if self.delete_operation not in self.user_list_grid.operations:
self.user_list_grid.operations.append(self.delete_operation)
if self.undelete_operation not in self.user_list_grid.operations:
self.user_list_grid.operations.append(self.undelete_operation)
if self.purge_operation not in self.user_list_grid.operations:
self.user_list_grid.operations.append(self.purge_operation)
# Render the list view
return self.user_list_grid(trans, **kwd)
@web.expose
@web.require_admin
def name_autocomplete_data(self, trans, q=None, limit=None, timestamp=None):
"""Return autocomplete data for user emails"""
ac_data = ""
for user in trans.sa_session.query(trans.app.model.User).filter_by(deleted=False).filter(func.lower(trans.app.model.User.email).like(q.lower() + "%")):
ac_data = ac_data + user.email + "\n"
return ac_data
@web.expose
@web.require_admin
def manage_roles_and_groups_for_user(self, trans, **kwd):
user_id = kwd.get('id', None)
message = ''
status = ''
if not user_id:
message += "Invalid user id (%s) received" % str(user_id)
trans.response.send_redirect(web.url_for(controller='admin',
action='users',
message=util.sanitize_text(message),
status='error'))
user = get_user(trans, user_id)
private_role = trans.app.security_agent.get_private_user_role(user)
if kwd.get('user_roles_groups_edit_button', False):
# Make sure the user is not dis-associating himself from his private role
out_roles = kwd.get('out_roles', [])
if out_roles:
out_roles = [trans.sa_session.query(trans.app.model.Role).get(x) for x in util.listify(out_roles)]
if private_role in out_roles:
message += "You cannot eliminate a user's private role association. "
status = 'error'
in_roles = kwd.get('in_roles', [])
if in_roles:
in_roles = [trans.sa_session.query(trans.app.model.Role).get(x) for x in util.listify(in_roles)]
out_groups = kwd.get('out_groups', [])
if out_groups:
out_groups = [trans.sa_session.query(trans.app.model.Group).get(x) for x in util.listify(out_groups)]
in_groups = kwd.get('in_groups', [])
if in_groups:
in_groups = [trans.sa_session.query(trans.app.model.Group).get(x) for x in util.listify(in_groups)]
if in_roles:
trans.app.security_agent.set_entity_user_associations(users=[user], roles=in_roles, groups=in_groups)
trans.sa_session.refresh(user)
message += "User '%s' has been updated with %d associated roles and %d associated groups (private roles are not displayed)" % \
(user.email, len(in_roles), len(in_groups))
trans.response.send_redirect(web.url_for(controller='admin',
action='users',
message=util.sanitize_text(message),
status='done'))
in_roles = []
out_roles = []
in_groups = []
out_groups = []
for role in trans.sa_session.query(trans.app.model.Role).filter(trans.app.model.Role.table.c.deleted == false()) \
.order_by(trans.app.model.Role.table.c.name):
if role in [x.role for x in user.roles]:
in_roles.append((role.id, role.name))
elif role.type != trans.app.model.Role.types.PRIVATE:
# There is a 1 to 1 mapping between a user and a PRIVATE role, so private roles should
# not be listed in the roles form fields, except for the currently selected user's private
# role, which should always be in in_roles. The check above is added as an additional
# precaution, since for a period of time we were including private roles in the form fields.
out_roles.append((role.id, role.name))
for group in trans.sa_session.query(trans.app.model.Group).filter(trans.app.model.Group.table.c.deleted == false()) \
.order_by(trans.app.model.Group.table.c.name):
if group in [x.group for x in user.groups]:
in_groups.append((group.id, group.name))
else:
out_groups.append((group.id, group.name))
message += "User '%s' is currently associated with %d roles and is a member of %d groups" % \
(user.email, len(in_roles), len(in_groups))
if not status:
status = 'done'
return trans.fill_template('/webapps/tool_shed/admin/user/user.mako',
user=user,
in_roles=in_roles,
out_roles=out_roles,
in_groups=in_groups,
out_groups=out_groups,
message=message,
status=status)
# ---- Utility methods -------------------------------------------------------
def get_user(trans, user_id):
"""Get a User from the database by id."""
user = trans.sa_session.query(trans.model.User).get(trans.security.decode_id(user_id))
if not user:
return trans.show_error_message("User not found for id (%s)" % str(user_id))
return user
def get_role(trans, id):
"""Get a Role from the database by id."""
# Load user from database
id = trans.security.decode_id(id)
role = trans.sa_session.query(trans.model.Role).get(id)
if not role:
return trans.show_error_message("Role not found for id (%s)" % str(id))
return role
def get_group(trans, id):
"""Get a Group from the database by id."""
# Load user from database
id = trans.security.decode_id(id)
group = trans.sa_session.query(trans.model.Group).get(id)
if not group:
return trans.show_error_message("Group not found for id (%s)" % str(id))
return group
| 53.156313 | 159 | 0.508897 |
cd48ef126e05dffc36f9246a668539147c19f7d2 | 4,364 | py | Python | iminuit/_minuit_methods.py | rogovsky/iminuit | 9bffe21eafe3160ad43140570deeee1ebfede9d3 | [
"MIT"
] | null | null | null | iminuit/_minuit_methods.py | rogovsky/iminuit | 9bffe21eafe3160ad43140570deeee1ebfede9d3 | [
"MIT"
] | null | null | null | iminuit/_minuit_methods.py | rogovsky/iminuit | 9bffe21eafe3160ad43140570deeee1ebfede9d3 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
from warnings import warn
from iminuit.iminuit_warnings import InitialParamWarning
from iminuit import util as mutil
import numpy as np
def pedantic(self, parameters, kwds, errordef):
def w(msg):
warn(msg, InitialParamWarning, stacklevel=3)
for vn in parameters:
if vn not in kwds:
w("Parameter %s does not have initial value. Assume 0." % vn)
if "error_" + vn not in kwds and "fix_" + mutil.param_name(vn) not in kwds:
w(
"Parameter %s is floating but does not have initial step size. Assume 1."
% vn
)
for vlim in mutil.extract_limit(kwds):
if mutil.param_name(vlim) not in parameters:
w(
"%s is given. But there is no parameter %s. Ignore."
% (vlim, mutil.param_name(vlim))
)
for vfix in mutil.extract_fix(kwds):
if mutil.param_name(vfix) not in parameters:
w(
"%s is given. But there is no parameter %s. Ignore."
% (vfix, mutil.param_name(vfix))
)
for verr in mutil.extract_error(kwds):
if mutil.param_name(verr) not in parameters:
w(
"%s float. But there is no parameter %s. Ignore."
% (verr, mutil.param_name(verr))
)
if errordef is None:
w("errordef is not given. Default to 1.")
def profile(self, vname, bins, bound, args, subtract_min):
# center value
val = np.linspace(bound[0], bound[1], bins, dtype=np.double)
result = np.empty(bins, dtype=np.double)
pos = self.var2pos[vname]
n = val.shape[0]
arg = list(self.args if args is None else args)
if self.use_array_call:
varg = np.array(arg, dtype=np.double)
for i in range(n):
varg[pos] = val[i]
result[i] = self.fcn(varg)
else:
for i in range(n):
arg[pos] = val[i]
result[i] = self.fcn(*arg)
if subtract_min:
result -= self.fval
return val, result
def draw_profile(self, vname, x, y, s, band, text):
from matplotlib import pyplot as plt
if s is not None:
s = np.array(s, dtype=bool)
x = x[s]
y = y[s]
plt.plot(x, y)
plt.grid(True)
plt.xlabel(vname)
plt.ylabel("FCN")
if vname in self.values:
v = self.values[vname]
else:
v = np.argmin(y)
vmin = None
vmax = None
if (vname, 1) in self.merrors:
vmin = v + self.merrors[(vname, -1)]
vmax = v + self.merrors[(vname, 1)]
if vname in self.errors:
vmin = v - self.errors[vname]
vmax = v + self.errors[vname]
plt.axvline(v, color="r")
if vmin is not None and band:
plt.axvspan(vmin, vmax, facecolor="g", alpha=0.5)
if text:
plt.title(
("%s = %.3g" % (vname, v))
if vmin is None
else ("%s = %.3g - %.3g + %.3g" % (vname, v, v - vmin, vmax - v)),
fontsize="large",
)
return x, y
def draw_contour(self, x, y, bins=20, bound=2, args=None, show_sigma=False):
from matplotlib import pyplot as plt
vx, vy, vz = self.contour(x, y, bins, bound, args, subtract_min=True)
v = [self.errordef * ((i + 1) ** 2) for i in range(bound)]
CS = plt.contour(vx, vy, vz, v, colors=["b", "k", "r"])
if not show_sigma:
plt.clabel(CS, v)
else:
tmp = dict((vv, r"%i $\sigma$" % (i + 1)) for i, vv in enumerate(v))
plt.clabel(CS, v, fmt=tmp, fontsize=16)
plt.xlabel(x)
plt.ylabel(y)
plt.axhline(self.values[y], color="k", ls="--")
plt.axvline(self.values[x], color="k", ls="--")
plt.grid(True)
return vx, vy, vz
def draw_mncontour(self, x, y, nsigma=2, numpoints=20):
from matplotlib import pyplot as plt
from matplotlib.contour import ContourSet
c_val = []
c_pts = []
for sigma in range(1, nsigma + 1):
pts = self.mncontour(x, y, numpoints, sigma)[2]
# close curve
pts.append(pts[0])
c_val.append(sigma)
c_pts.append([pts]) # level can have more than one contour in mpl
cs = ContourSet(plt.gca(), c_val, c_pts)
plt.clabel(cs, inline=1, fontsize=10)
plt.xlabel(x)
plt.ylabel(y)
return cs
| 30.517483 | 89 | 0.566911 |
560ac21d4d4af680d4cef7520defb52bab5f7623 | 259 | py | Python | 1Dec/serializationDemo.py | universekavish/Python-Training | ccd7dfbc8802662de0e0fc20fe99bb3aae4c6e18 | [
"Apache-2.0"
] | null | null | null | 1Dec/serializationDemo.py | universekavish/Python-Training | ccd7dfbc8802662de0e0fc20fe99bb3aae4c6e18 | [
"Apache-2.0"
] | null | null | null | 1Dec/serializationDemo.py | universekavish/Python-Training | ccd7dfbc8802662de0e0fc20fe99bb3aae4c6e18 | [
"Apache-2.0"
] | null | null | null | import pickle
#serialization process
L = [10, 20, 'Hello', [30, 40], {1:2, 3:4}, 50]
fout = open('mydata.serialized', 'wb')
pickle.dump(L, fout)
fout.close()
#Deserialization
fin = open('mydata.serialized', 'rb')
L1 = pickle.load(fin)
fin.close()
print(L1) | 18.5 | 47 | 0.664093 |
99820f62e3a3d017fa61f7c6a7119d0613ccc035 | 5,946 | py | Python | test/functional/interface_zmq.py | DclrCoin/dclrcoin | 1ca3bd1f787fdead6ae84b7cda2bab6c6cb62b1d | [
"MIT"
] | null | null | null | test/functional/interface_zmq.py | DclrCoin/dclrcoin | 1ca3bd1f787fdead6ae84b7cda2bab6c6cb62b1d | [
"MIT"
] | 2 | 2021-12-18T03:02:54.000Z | 2022-01-17T17:55:36.000Z | test/functional/interface_zmq.py | DclrCoin/dclrcoin | 1ca3bd1f787fdead6ae84b7cda2bab6c6cb62b1d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ notification interface."""
import struct
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import CTransaction, hash256
from test_framework.util import assert_equal, connect_nodes
from io import BytesIO
from time import sleep
def hash256_reversed(byte_str):
return hash256(byte_str)[::-1]
class ZMQSubscriber:
def __init__(self, socket, topic):
self.sequence = 0
self.socket = socket
self.topic = topic
import zmq
self.socket.setsockopt(zmq.SUBSCRIBE, self.topic)
def receive(self):
topic, body, seq = self.socket.recv_multipart()
# Topic should match the subscriber topic.
assert_equal(topic, self.topic)
# Sequence should be incremental.
assert_equal(struct.unpack('<I', seq)[-1], self.sequence)
self.sequence += 1
return body
class ZMQTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_py3_zmq()
self.skip_if_no_dclrcoind_zmq()
def run_test(self):
import zmq
self.ctx = zmq.Context()
try:
self.test_basic()
self.test_reorg()
finally:
# Destroy the ZMQ context.
self.log.debug("Destroying ZMQ context")
self.ctx.destroy(linger=None)
def test_basic(self):
# All messages are received in the same socket which means
# that this test fails if the publishing order changes.
# Note that the publishing order is not defined in the documentation and
# is subject to change.
import zmq
# Invalid zmq arguments don't take down the node, see #17185.
self.restart_node(0, ["-zmqpubrawtx=foo", "-zmqpubhashtx=bar"])
address = 'tcp://127.0.0.1:28332'
socket = self.ctx.socket(zmq.SUB)
socket.set(zmq.RCVTIMEO, 60000)
# Subscribe to all available topics.
hashblock = ZMQSubscriber(socket, b"hashblock")
hashtx = ZMQSubscriber(socket, b"hashtx")
rawblock = ZMQSubscriber(socket, b"rawblock")
rawtx = ZMQSubscriber(socket, b"rawtx")
self.restart_node(0, ["-zmqpub%s=%s" % (sub.topic.decode(), address) for sub in [hashblock, hashtx, rawblock, rawtx]])
connect_nodes(self.nodes[0], 1)
socket.connect(address)
# Relax so that the subscriber is ready before publishing zmq messages
sleep(0.2)
num_blocks = 5
self.log.info("Generate %(n)d blocks (and %(n)d coinbase txes)" % {"n": num_blocks})
genhashes = self.nodes[0].generatetoaddress(num_blocks, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_all()
for x in range(num_blocks):
# Should receive the coinbase txid.
txid = hashtx.receive()
# Should receive the coinbase raw transaction.
hex = rawtx.receive()
tx = CTransaction()
tx.deserialize(BytesIO(hex))
tx.calc_sha256()
assert_equal(tx.hash, txid.hex())
# Should receive the generated block hash.
hash = hashblock.receive().hex()
assert_equal(genhashes[x], hash)
# The block should only have the coinbase txid.
assert_equal([txid.hex()], self.nodes[1].getblock(hash)["tx"])
# Should receive the generated raw block.
block = rawblock.receive()
assert_equal(genhashes[x], hash256_reversed(block[:80]).hex())
if self.is_wallet_compiled():
self.log.info("Wait for tx from second node")
payment_txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# Should receive the broadcasted txid.
txid = hashtx.receive()
assert_equal(payment_txid, txid.hex())
# Should receive the broadcasted raw transaction.
hex = rawtx.receive()
assert_equal(payment_txid, hash256_reversed(hex).hex())
self.log.info("Test the getzmqnotifications RPC")
assert_equal(self.nodes[0].getzmqnotifications(), [
{"type": "pubhashblock", "address": address, "hwm": 1000},
{"type": "pubhashtx", "address": address, "hwm": 1000},
{"type": "pubrawblock", "address": address, "hwm": 1000},
{"type": "pubrawtx", "address": address, "hwm": 1000},
])
assert_equal(self.nodes[1].getzmqnotifications(), [])
def test_reorg(self):
import zmq
address = 'tcp://127.0.0.1:28333'
socket = self.ctx.socket(zmq.SUB)
socket.set(zmq.RCVTIMEO, 60000)
hashblock = ZMQSubscriber(socket, b'hashblock')
# Should only notify the tip if a reorg occurs
self.restart_node(0, ['-zmqpub%s=%s' % (hashblock.topic.decode(), address)])
socket.connect(address)
# Relax so that the subscriber is ready before publishing zmq messages
sleep(0.2)
# Generate 1 block in nodes[0] and receive all notifications
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
assert_equal(self.nodes[0].getbestblockhash(), hashblock.receive().hex())
# Generate 2 blocks in nodes[1]
self.nodes[1].generatetoaddress(2, ADDRESS_BCRT1_UNSPENDABLE)
# nodes[0] will reorg chain after connecting back nodes[1]
connect_nodes(self.nodes[0], 1)
# Should receive nodes[1] tip
assert_equal(self.nodes[1].getbestblockhash(), hashblock.receive().hex())
if __name__ == '__main__':
ZMQTest().main()
| 36.931677 | 126 | 0.634208 |
22aa4fc1db26fb483ad506ce084fd539b23a6e52 | 618 | py | Python | env/lib/python3.8/site-packages/plotly/validators/contour/hoverlabel/font/_family.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/contour/hoverlabel/font/_family.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/contour/hoverlabel/font/_family.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="family", parent_name="contour.hoverlabel.font", **kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
| 34.333333 | 83 | 0.619741 |
2710dad61612063f7cb0a36f5d388646684fb87e | 8,551 | py | Python | tests/integration/edges_settings.py | kytos-ng/pathfinder | 99cb431a76d9e0602f7ffb553cd57a91c11bb8e1 | [
"MIT"
] | null | null | null | tests/integration/edges_settings.py | kytos-ng/pathfinder | 99cb431a76d9e0602f7ffb553cd57a91c11bb8e1 | [
"MIT"
] | 10 | 2021-11-08T12:55:28.000Z | 2022-02-23T18:48:32.000Z | tests/integration/edges_settings.py | kytos-ng/pathfinder | 99cb431a76d9e0602f7ffb553cd57a91c11bb8e1 | [
"MIT"
] | 1 | 2021-11-30T14:53:17.000Z | 2021-11-30T14:53:17.000Z | """Module to overwrite all the needed methods."""
# Core modules to import
from kytos.core.link import Link
# pylint: disable=E0401
from tests.integration.test_paths import TestPaths
class EdgesSettings(TestPaths):
"""Class to setups all the settings related to topology."""
@staticmethod
def generate_topology():
"""Generate a predetermined topology."""
switches, interfaces, links = {}, {}, {}
TestPaths.create_switch("S1", switches)
TestPaths.add_interfaces(2, switches["S1"], interfaces)
TestPaths.create_switch("S2", switches)
TestPaths.add_interfaces(2, switches["S2"], interfaces)
TestPaths.create_switch("S3", switches)
TestPaths.add_interfaces(6, switches["S3"], interfaces)
TestPaths.create_switch("S4", switches)
TestPaths.add_interfaces(2, switches["S4"], interfaces)
TestPaths.create_switch("S5", switches)
TestPaths.add_interfaces(6, switches["S5"], interfaces)
TestPaths.create_switch("S6", switches)
TestPaths.add_interfaces(5, switches["S6"], interfaces)
TestPaths.create_switch("S7", switches)
TestPaths.add_interfaces(2, switches["S7"], interfaces)
TestPaths.create_switch("S8", switches)
TestPaths.add_interfaces(8, switches["S8"], interfaces)
TestPaths.create_switch("S9", switches)
TestPaths.add_interfaces(4, switches["S9"], interfaces)
TestPaths.create_switch("S10", switches)
TestPaths.add_interfaces(3, switches["S10"], interfaces)
TestPaths.create_switch("S11", switches)
TestPaths.add_interfaces(3, switches["S11"], interfaces)
TestPaths.create_switch("User1", switches)
TestPaths.add_interfaces(4, switches["User1"], interfaces)
TestPaths.create_switch("User2", switches)
TestPaths.add_interfaces(2, switches["User2"], interfaces)
TestPaths.create_switch("User3", switches)
TestPaths.add_interfaces(2, switches["User3"], interfaces)
TestPaths.create_switch("User4", switches)
TestPaths.add_interfaces(3, switches["User4"], interfaces)
EdgesSettings._fill_links(links, interfaces)
EdgesSettings._add_metadata_to_links(links)
return switches, links
@staticmethod
def _add_metadata_to_links(links):
links["S1:1<->S2:1"].extend_metadata(
{"reliability": 5, "bandwidth": 100, "delay": 105}
)
links["S1:2<->User1:1"].extend_metadata(
{"reliability": 5, "bandwidth": 100, "delay": 1}
)
links["S2:2<->User4:1"].extend_metadata(
{"reliability": 5, "bandwidth": 100, "delay": 10}
)
links["S3:1<->S5:1"].extend_metadata(
{"reliability": 5, "bandwidth": 10, "delay": 112}
)
links["S3:2<->S7:1"].extend_metadata(
{"reliability": 5, "bandwidth": 100, "delay": 1}
)
links["S3:3<->S8:1"].extend_metadata(
{"reliability": 5, "bandwidth": 100, "delay": 1}
)
links["S3:4<->S11:1"].extend_metadata(
{"reliability": 3, "bandwidth": 100, "delay": 6}
)
links["S3:5<->User3:1"].extend_metadata(
{"reliability": 5, "bandwidth": 100, "delay": 1}
)
links["S3:6<->User4:2"].extend_metadata(
{"reliability": 5, "bandwidth": 100, "delay": 10}
)
links["S4:1<->S5:2"].extend_metadata(
{
"reliability": 1,
"bandwidth": 100,
"delay": 30,
"ownership": {"A": {}},
}
)
links["S4:2<->User1:2"].extend_metadata(
{
"reliability": 3,
"bandwidth": 100,
"delay": 110,
"ownership": {"A": {}},
}
)
links["S5:3<->S6:1"].extend_metadata(
{"reliability": 1, "bandwidth": 100, "delay": 40}
)
links["S5:4<->S6:2"].extend_metadata(
{
"reliability": 3,
"bandwidth": 100,
"delay": 40,
"ownership": {"A": {}},
}
)
links["S5:5<->S8:2"].extend_metadata(
{"reliability": 5, "bandwidth": 100, "delay": 112}
)
links["S5:6<->User1:3"].extend_metadata(
{"reliability": 3, "bandwidth": 100, "delay": 60}
)
links["S6:3<->S9:1"].extend_metadata(
{"reliability": 3, "bandwidth": 100, "delay": 60}
)
links["S6:4<->S9:2"].extend_metadata(
{"reliability": 5, "bandwidth": 100, "delay": 62}
)
links["S6:5<->S10:1"].extend_metadata(
{"bandwidth": 100, "delay": 108, "ownership": {"A": {}}}
)
links["S7:2<->S8:3"].extend_metadata(
{"reliability": 5, "bandwidth": 100, "delay": 1}
)
links["S8:4<->S9:3"].extend_metadata(
{"reliability": 3, "bandwidth": 100, "delay": 32}
)
links["S8:5<->S9:4"].extend_metadata(
{"reliability": 3, "bandwidth": 100, "delay": 110}
)
links["S8:6<->S10:2"].extend_metadata(
{"reliability": 5, "bandwidth": 100, "ownership": {"A": {}}}
)
links["S8:7<->S11:2"].extend_metadata(
{"reliability": 3, "bandwidth": 100, "delay": 7}
)
links["S8:8<->User3:2"].extend_metadata(
{"reliability": 5, "bandwidth": 100, "delay": 1}
)
links["S10:3<->User2:1"].extend_metadata(
{
"reliability": 3,
"bandwidth": 100,
"delay": 10,
"ownership": {"A": {}},
}
)
links["S11:3<->User2:2"].extend_metadata(
{"reliability": 3, "bandwidth": 100, "delay": 6}
)
links["User1:4<->User4:3"].extend_metadata(
{"reliability": 5, "bandwidth": 10, "delay": 105}
)
@staticmethod
def _fill_links(links, interfaces):
links["S1:1<->S2:1"] = Link(interfaces["S1:1"], interfaces["S2:1"])
links["S1:2<->User1:1"] = Link(
interfaces["S1:2"], interfaces["User1:1"]
)
links["S2:2<->User4:1"] = Link(
interfaces["S2:2"], interfaces["User4:1"]
)
links["S3:1<->S5:1"] = Link(interfaces["S3:1"], interfaces["S5:1"])
links["S3:2<->S7:1"] = Link(interfaces["S3:2"], interfaces["S7:1"])
links["S3:3<->S8:1"] = Link(interfaces["S3:3"], interfaces["S8:1"])
links["S3:4<->S11:1"] = Link(interfaces["S3:4"], interfaces["S11:1"])
links["S3:5<->User3:1"] = Link(
interfaces["S3:5"], interfaces["User3:1"]
)
links["S3:6<->User4:2"] = Link(
interfaces["S3:6"], interfaces["User4:2"]
)
links["S4:1<->S5:2"] = Link(interfaces["S4:1"], interfaces["S5:2"])
links["S4:2<->User1:2"] = Link(
interfaces["S4:2"], interfaces["User1:2"]
)
links["S5:3<->S6:1"] = Link(interfaces["S5:3"], interfaces["S6:1"])
links["S5:4<->S6:2"] = Link(interfaces["S5:4"], interfaces["S6:2"])
links["S5:5<->S8:2"] = Link(interfaces["S5:5"], interfaces["S8:2"])
links["S5:6<->User1:3"] = Link(
interfaces["S5:6"], interfaces["User1:3"]
)
links["S6:3<->S9:1"] = Link(interfaces["S6:3"], interfaces["S9:1"])
links["S6:4<->S9:2"] = Link(interfaces["S6:4"], interfaces["S9:2"])
links["S6:5<->S10:1"] = Link(interfaces["S6:5"], interfaces["S10:1"])
links["S7:2<->S8:3"] = Link(interfaces["S7:2"], interfaces["S8:3"])
links["S8:4<->S9:3"] = Link(interfaces["S8:4"], interfaces["S9:3"])
links["S8:5<->S9:4"] = Link(interfaces["S8:5"], interfaces["S9:4"])
links["S8:6<->S10:2"] = Link(interfaces["S8:6"], interfaces["S10:2"])
links["S8:7<->S11:2"] = Link(interfaces["S8:7"], interfaces["S11:2"])
links["S8:8<->User3:2"] = Link(
interfaces["S8:8"], interfaces["User3:2"]
)
links["S10:3<->User2:1"] = Link(
interfaces["S10:3"], interfaces["User2:1"]
)
links["S11:3<->User2:2"] = Link(
interfaces["S11:3"], interfaces["User2:2"]
)
links["User1:4<->User4:3"] = Link(
interfaces["User1:4"], interfaces["User4:3"]
)
for link in links.values():
link.enable()
link.activate()
| 30.758993 | 77 | 0.525319 |
bd30487c91c140290e8be25a09d1fef7eb3a234b | 3,700 | py | Python | resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_single_alt_loc_files.py | J-E-J-S/aaRS-Pipeline | 43f59f28ab06e4b16328c3bc405cdddc6e69ac44 | [
"MIT"
] | null | null | null | resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_single_alt_loc_files.py | J-E-J-S/aaRS-Pipeline | 43f59f28ab06e4b16328c3bc405cdddc6e69ac44 | [
"MIT"
] | null | null | null | resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_single_alt_loc_files.py | J-E-J-S/aaRS-Pipeline | 43f59f28ab06e4b16328c3bc405cdddc6e69ac44 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
#
#
# $Header: /opt/cvs/python/packages/share1.5/AutoDockTools/Utilities24/prepare_single_alt_loc_files.py,v 1.1.2.1 2012/04/13 21:30:24 rhuey Exp $
#
from MolKit import Read
def hasAt(name):
if '@' in name:
return 1
else:
return 0
def getAT_types(m):
AT_SET = m.allAtoms.get(lambda x: hasAt(x.name))
AT_SET_SET = set(AT_SET.name)
alt_items = {}
for ent in AT_SET_SET:
alt_items[ent.split("@")[1]] = 1
print "@@returning ", alt_items.keys(), " @@"
return alt_items.keys()
if __name__ == '__main__':
import sys
import getopt
def usage():
"Print helpful, accurate usage statement to stdout."
print "Usage: prepare_single_alt_loc_files.py -r filename.pdb"
print
print " Description of command..."
print " -r filename.pdb "
print " create separate pdb files for file with alt_loc coords"
print " Optional parameters:"
print " [-v] verbose output (default is minimal output)"
print " [-o pdb_stem] (default creates 'filename_A.pdb', 'filename_B.pdb' etc)"
# process command arguments
try:
opt_list, args = getopt.getopt(sys.argv[1:], 'r:vo:h')
except getopt.GetoptError, msg:
print 'prepare_single_alt_loc_files.py: %s' %msg
usage()
sys.exit(2)
# initialize required parameters
#-r: filename
filename = None
# optional parameters
#-v verbose
verbose = None
#-o pdb_stem
pdb_stem = None
#'r:vo:h'
for o, a in opt_list:
if o in ('-r', '--r'):
filename = a
if verbose: print 'set filename to ', a
if o in ('-v', '--v'):
verbose = True
if verbose: print 'set verbose to ', True
if o in ('-o', '--o'):
pdb_stem = a
if verbose: print 'set pdb_stem to ', a
if o in ('-h', '--'):
usage()
sys.exit()
if not filename:
print 'prepare_single_alt_loc_files: filename must be specified.'
usage()
sys.exit()
file_stem = filename.split('.')[0]
if pdb_stem is not None:
file_stem = pdb_stem
mols = Read(filename)
if verbose: print 'read ', filename
mol = mols[0]
if len(mols)>1:
if verbose: print "more than one molecule in file using molecule with most atoms"
#use the molecule with the most atoms
ctr = 1
for m in mols[1:]:
ctr += 1
if len(m.allAtoms)>len(mol.allAtoms):
mol = m
if verbose: print "mol set to ", ctr, "th molecule with", len(mol.allAtoms), "atoms"
ats = mol.allAtoms.get("*@*")
if not len(ats):
print 'Nothing to do:no alt loc atoms found in ', filename
sys.exit()
list_to_write = getAT_types(mol)
ATOMLINES = mol.parser.getAtomsLines(-2,0)
for altT in list_to_write:
fn = file_stem + '_' + altT + '.pdb'
fptr = open(fn, 'w')
ctr = 1
for ll in ATOMLINES:
if ll[16]==altT: #'B'
newL = ll[:7] +"%4d" %(ctr) + ll[11:16]+" " + ll[17:]
#newL = ll[:6] +"%4d" %ctr
#newL = ll[:16] + " " + ll[17:]
ctr = ctr + 1
elif ll[16]==' ':
#newL = ll
newL = ll[:7] +"%4d" %(ctr) + ll[11:16]+" " + ll[17:]
ctr = ctr + 1
else:
newL = ""
if len(newL): fptr.write(newL)
fptr.close()
# To execute this command type:
# prepare_single_alt_loc_files.py -r pdb_file -o pdb_stem
| 28.461538 | 144 | 0.531081 |
a102baff0eb9ecbe56875c886ef2b311977eefbe | 11,092 | py | Python | ckan/cli/views.py | ziveo/ckan | f4cfe5e28789df58b2bf7e73e5989ffda00e5c5c | [
"Apache-2.0"
] | 1 | 2022-02-14T20:25:34.000Z | 2022-02-14T20:25:34.000Z | ckan/cli/views.py | ziveo/ckan | f4cfe5e28789df58b2bf7e73e5989ffda00e5c5c | [
"Apache-2.0"
] | 4 | 2020-03-24T17:53:23.000Z | 2021-03-31T19:19:03.000Z | ckan/cli/views.py | ziveo/ckan | f4cfe5e28789df58b2bf7e73e5989ffda00e5c5c | [
"Apache-2.0"
] | 3 | 2020-01-02T10:32:37.000Z | 2021-12-22T07:20:21.000Z | # encoding: utf-8
import itertools
import click
import json
import ckan.logic as logic
import ckan.model as model
import ckan.plugins as p
from ckan.cli import error_shout
from ckan.lib.datapreview import (
add_views_to_dataset_resources,
get_view_plugins,
get_default_view_plugins,
)
_page_size = 100
@click.group()
def views():
"""Manage resource views.
"""
pass
@views.command()
@click.argument(u"types", nargs=-1)
@click.option(u"-d", u"--dataset", multiple=True)
@click.option(u"--no-default-filters", is_flag=True)
@click.option(u"-s", u"--search")
@click.option(u"-y", u"--yes", is_flag=True)
@click.pass_context
def create(ctx, types, dataset, no_default_filters, search, yes):
"""Create views on relevant resources. You can optionally provide
specific view types (eg `recline_view`, `image_view`). If no types
are provided, the default ones will be used. These are generally
the ones defined in the `ckan.views.default_views` config option.
Note that on either case, plugins must be loaded (ie added to
`ckan.plugins`), otherwise the command will stop.
"""
datastore_enabled = (
u"datastore" in p.toolkit.config[u"ckan.plugins"].split()
)
flask_app = ctx.obj.app.apps[u"flask_app"]._wsgi_app
with flask_app.test_request_context():
loaded_view_plugins = _get_view_plugins(types, datastore_enabled)
if loaded_view_plugins is None:
return
site_user = logic.get_action(u"get_site_user")({u"ignore_auth": True}, {})
context = {u"user": site_user[u"name"]}
page = 1
while True:
query = _search_datasets(
page, loaded_view_plugins, dataset, search, no_default_filters
)
if query is None:
return
if page == 1 and query[u"count"] == 0:
return error_shout(
u"No datasets to create resource views on, exiting..."
)
elif page == 1 and not yes:
msg = (
u"\nYou are about to check {0} datasets for the "
+ u"following view plugins: {1}\n"
+ u" Do you want to continue?"
)
click.confirm(
msg.format(query[u"count"], loaded_view_plugins), abort=True
)
if query[u"results"]:
for dataset_dict in query[u"results"]:
if not dataset_dict.get(u"resources"):
continue
with flask_app.test_request_context():
views = add_views_to_dataset_resources(
context, dataset_dict, view_types=loaded_view_plugins
)
if views:
view_types = list({view[u"view_type"] for view in views})
msg = (
u"Added {0} view(s) of type(s) {1} to "
+ u"resources from dataset {2}"
)
click.secho(
msg.format(
len(views),
u", ".join(view_types),
dataset_dict[u"name"],
)
)
if len(query[u"results"]) < _page_size:
break
page += 1
else:
break
click.secho(u"Done", fg=u"green")
@views.command()
@click.argument(u"types", nargs=-1)
@click.option(u"-y", u"--yes", is_flag=True)
def clear(types, yes):
"""Permanently delete all views or the ones with the provided types.
"""
if not yes:
if types:
msg = (
u"Are you sure you want to delete all resource views "
+ u"of type {0}?".format(u", ".join(types))
)
else:
msg = u"Are you sure you want to delete all resource views?"
click.confirm(msg, abort=True)
site_user = logic.get_action(u"get_site_user")({u"ignore_auth": True}, {})
context = {u"user": site_user[u"name"]}
logic.get_action(u"resource_view_clear")(context, {u"view_types": types})
click.secho(u"Done", fg=u"green")
@views.command()
@click.option(u"-y", u"--yes", is_flag=True)
@click.pass_context
def clean(ctx, yes):
"""Permanently delete views for all types no longer present in the
`ckan.plugins` configuration option.
"""
names = []
flask_app = ctx.obj.app.apps[u"flask_app"]._wsgi_app
with flask_app.test_request_context():
for plugin in p.PluginImplementations(p.IResourceView):
names.append(str(plugin.info()[u"name"]))
results = model.ResourceView.get_count_not_in_view_types(names)
if not results:
return click.secho(u"No resource views to delete", fg=u"red")
click.secho(u"This command will delete.\n")
for row in results:
click.secho(u"%s of type %s" % (row[1], row[0]))
yes or click.confirm(
u"Do you want to delete these resource views?", abort=True
)
model.ResourceView.delete_not_in_view_types(names)
model.Session.commit()
click.secho(u"Deleted resource views.", fg=u"green")
def _get_view_plugins(view_plugin_types, get_datastore_views=False):
"""Returns the view plugins that were succesfully loaded
Views are provided as a list of ``view_plugin_types``. If no types
are provided, the default views defined in the
``ckan.views.default_views`` will be created. Only in this case
(when the default view plugins are used) the `get_datastore_views`
parameter can be used to get also view plugins that require data
to be in the DataStore.
If any of the provided plugins could not be loaded (eg it was not
added to `ckan.plugins`) the command will stop.
Returns a list of loaded plugin names.
"""
view_plugins = []
if not view_plugin_types:
click.secho(u"No view types provided, using default types")
view_plugins = get_default_view_plugins()
if get_datastore_views:
view_plugins.extend(
get_default_view_plugins(get_datastore_views=True)
)
else:
view_plugins = get_view_plugins(view_plugin_types)
loaded_view_plugins = [
view_plugin.info()[u"name"] for view_plugin in view_plugins
]
plugins_not_found = list(set(view_plugin_types) - set(loaded_view_plugins))
if plugins_not_found:
error_shout(
u"View plugin(s) not found : {0}. ".format(plugins_not_found)
+ u"Have they been added to the `ckan.plugins` configuration"
+ u" option?"
)
return None
return loaded_view_plugins
def _search_datasets(
page=1, view_types=[], dataset=[], search=u"", no_default_filters=False
):
"""
Perform a query with `package_search` and return the result
Results can be paginated using the `page` parameter
"""
n = _page_size
search_data_dict = {
u"q": u"",
u"fq": u"",
u"fq_list": [],
u"include_private": True,
u"rows": n,
u"start": n * (page - 1),
}
if dataset:
search_data_dict[u"q"] = u" OR ".join(
[
u'id:{0} OR name:"{0}"'.format(dataset_id)
for dataset_id in dataset
]
)
elif search:
search_data_dict = _update_search_params(search_data_dict, search)
if search_data_dict is None:
return None
elif not no_default_filters:
_add_default_filters(search_data_dict, view_types)
if not search_data_dict.get(u"q"):
search_data_dict[u"q"] = u"*:*"
query = p.toolkit.get_action(u"package_search")({}, search_data_dict)
return query
def _add_default_filters(search_data_dict, view_types):
"""
Adds extra filters to the `package_search` dict for common view types
It basically adds `fq` parameters that filter relevant resource formats
for the view types provided. For instance, if one of the view types is
`pdf_view` the following will be added to the final query:
fq=res_format:"pdf" OR res_format:"PDF"
This obviously should only be used if all view types are known and can
be filtered, otherwise we want all datasets to be returned. If a
non-filterable view type is provided, the search params are not
modified.
Returns the provided data_dict for `package_search`, optionally
modified with extra filters.
"""
from ckanext.imageview.plugin import DEFAULT_IMAGE_FORMATS
from ckanext.textview.plugin import get_formats as get_text_formats
from ckanext.datapusher.plugin import DEFAULT_FORMATS as datapusher_formats
filter_formats = []
for view_type in view_types:
if view_type == u"image_view":
for _format in DEFAULT_IMAGE_FORMATS:
filter_formats.extend([_format, _format.upper()])
elif view_type == u"text_view":
formats = get_text_formats(p.toolkit.config)
for _format in itertools.chain.from_iterable(formats.values()):
filter_formats.extend([_format, _format.upper()])
elif view_type == u"pdf_view":
filter_formats.extend([u"pdf", u"PDF"])
elif view_type in [
u"recline_view",
u"recline_grid_view",
u"recline_graph_view",
u"recline_map_view",
]:
if datapusher_formats[0] in filter_formats:
continue
for _format in datapusher_formats:
if u"/" not in _format:
filter_formats.extend([_format, _format.upper()])
else:
# There is another view type provided so we can't add any
# filter
return search_data_dict
filter_formats_query = [
u'+res_format:"{0}"'.format(_format) for _format in filter_formats
]
search_data_dict[u"fq_list"].append(u" OR ".join(filter_formats_query))
return search_data_dict
def _update_search_params(search_data_dict, search):
"""
Update the `package_search` data dict with the user provided parameters
Supported fields are `q`, `fq` and `fq_list`.
If the provided JSON object can not be parsed the process stops with
an error.
Returns the updated data dict
"""
if not search:
return search_data_dict
try:
user_search_params = json.loads(search)
except ValueError as e:
error_shout(u"Unable to parse JSON search parameters: {0}".format(e))
return None
if user_search_params.get(u"q"):
search_data_dict[u"q"] = user_search_params[u"q"]
if user_search_params.get(u"fq"):
if search_data_dict[u"fq"]:
search_data_dict[u"fq"] += u" " + user_search_params[u"fq"]
else:
search_data_dict[u"fq"] = user_search_params[u"fq"]
if user_search_params.get(u"fq_list") and isinstance(
user_search_params[u"fq_list"], list
):
search_data_dict[u"fq_list"].extend(user_search_params[u"fq_list"])
return search_data_dict
| 30.306011 | 79 | 0.618644 |
373732d2cedf7272dd600479238c6d6b4806d9be | 102 | py | Python | apps/inventory/apps.py | mikespux/prov-jewellery-cloud | 4bb16b74d4f32eec938e64325c39bb5770ad2848 | [
"MIT"
] | 1 | 2020-05-17T22:27:02.000Z | 2020-05-17T22:27:02.000Z | apps/inventory/apps.py | mikespux/prov-jewellery-cloud | 4bb16b74d4f32eec938e64325c39bb5770ad2848 | [
"MIT"
] | 7 | 2020-06-05T19:36:40.000Z | 2022-03-11T23:37:38.000Z | apps/inventory/apps.py | mikespux/prov-jewellery-cloud | 4bb16b74d4f32eec938e64325c39bb5770ad2848 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class InventoryItemConfig(AppConfig):
name = 'inventory_item'
| 17 | 37 | 0.784314 |
af1a6e9728363ac90be9a87ee8c90a2012aa73a0 | 3,115 | py | Python | battleship/ai/learner.py | agonopol/battleship-ai | eef51e972b26e5454dfc0d3685d417cc429969b1 | [
"MIT"
] | null | null | null | battleship/ai/learner.py | agonopol/battleship-ai | eef51e972b26e5454dfc0d3685d417cc429969b1 | [
"MIT"
] | null | null | null | battleship/ai/learner.py | agonopol/battleship-ai | eef51e972b26e5454dfc0d3685d417cc429969b1 | [
"MIT"
] | null | null | null | from battleship.ship import Ship
from battleship.grid import Grid
from battleship.ai.player import Player, Outcome
import numpy as np
import os
from tensorforce.agents import PPOAgent
def to_model(size, path):
return os.path.join(path, "%dx%d" % (size, size), "model")
def agent(size, path):
path = os.path.dirname(to_model(size, path))
# Lets try out simple gradient descent agent for now, no reason to be fancy
agent = PPOAgent(states=dict(type='float', shape=(size, size)),
actions=dict(
x=dict(type='int', num_actions=size),
y=dict(type='int', num_actions=size),
),
#Simple 2 layer network, first flatten matrix then train on 2 layers, need time to experiment with more complex
network=[
dict(type='flatten'),
dict(type='dense', size=64),
dict(type='dense', size=64),
],
batching_capacity=100,
discount=.75
)
if os.path.exists(path):
print("Loading previous model ... ")
agent.restore_model(path)
return agent
return agent
class Learner(Player):
def __init__(self, size, path):
super(Learner, self).__init__()
self.size = size
self.grid = Grid(size)
self.mask = np.zeros((self.size, self.size))
self.ships = 0
self.path = path
self.agent = agent(size, path)
def save(self):
path = to_model(self.size, self.path)
os.makedirs(path, exist_ok=True)
self.agent.save_model(path, append_timestep=False)
def setup(self, ships=(5, 4, 3, 3, 2)):
self.grid = Grid(self.size)
self.mask = np.zeros((self.size, self.size))
self.ships = sum(ships)
for n in ships:
ship = Ship.random(self.size, n)
while not self.grid.place(ship):
ship = Ship.random(self.size, n)
def target(self) -> (int, int):
action = self.agent.act(self.mask)
return action['x'], action['y']
def report(self, x: int, y: int) -> Outcome:
result = self.grid.hit(x, y)
if result == Outcome.HIT:
self.ships -= 1
return result
def display(self, hidden=True):
self.grid.display(hidden=hidden)
def mark(self, x: int, y: int, result: Outcome):
if result == Outcome.WIN:
self.mask[x, y] = 1
self.agent.observe(reward=10.0, terminal=True)
elif result == Outcome.HIT:
self.mask[x, y] = 1
self.agent.observe(reward=3.0, terminal=False)
elif result == Outcome.MISS:
self.mask[x, y] = -1
self.agent.observe(reward=.5, terminal=False)
elif result == Outcome.INVALID:
self.agent.observe(reward=0, terminal=False)
else:
self.agent.observe(reward=-10, terminal=True)
return
def remaining(self) -> int:
return self.ships
| 33.494624 | 132 | 0.552167 |
e26d2562e1acfef9b62c74f1e96a5e5df25acc03 | 3,198 | py | Python | dev_scripts/parallel_script_skeleton.py | alphamatic/amp | 5018137097159415c10eaa659a2e0de8c4e403d4 | [
"BSD-3-Clause"
] | 5 | 2021-08-10T23:16:44.000Z | 2022-03-17T17:27:00.000Z | dev_scripts/parallel_script_skeleton.py | alphamatic/amp | 5018137097159415c10eaa659a2e0de8c4e403d4 | [
"BSD-3-Clause"
] | 330 | 2021-06-10T17:28:22.000Z | 2022-03-31T00:55:48.000Z | dev_scripts/parallel_script_skeleton.py | alphamatic/amp | 5018137097159415c10eaa659a2e0de8c4e403d4 | [
"BSD-3-Clause"
] | 6 | 2021-06-10T17:20:32.000Z | 2022-03-28T08:08:03.000Z | #!/usr/bin/env python
"""
This is an example of script using the `joblib_helpers` API to run jobs in
parallel.
# Run with:
> clear; parallel_script_skeleton.py --workload success --num_threads serial
> clear; parallel_script_skeleton.py --workload success --num_threads 2
> clear; parallel_script_skeleton.py --workload failure --num_threads serial
> clear; parallel_script_skeleton.py --workload failure --num_threads 3
Add a description of what the script does and examples of command lines.
Check dev_scripts/linter.py to see an example of a script using this
template.
Import as:
import dev_scripts.parallel_script_skeleton as dspascsk
"""
# TODO(gp): We should test this, although the library is already tested.
import argparse
import logging
import os
import helpers.hdbg as hdbg
import helpers.hjoblib as hjoblib
import helpers.hparser as hparser
# This module contains example workloads.
import helpers.test.test_joblib_helpers
# import helpers.hsystem as hsysinte
_LOG = logging.getLogger(__name__)
# #############################################################################
def _parse() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--workload",
action="store",
type=str,
choices=["success", "failure"],
help="Worklod to execute",
)
parser.add_argument(
"--randomize",
action="store_true",
)
parser.add_argument(
"--seed",
action="store",
default=1,
type=int,
)
# parser = hparser.add_dst_dir_arg(parser, dst_dir_required=True)
parser = hparser.add_parallel_processing_arg(parser)
parser = hparser.add_verbosity_arg(parser)
return parser # type: ignore
def _main(parser: argparse.ArgumentParser) -> None:
args = parser.parse_args()
hdbg.init_logger(verbosity=args.log_level, use_exec_path=True)
# Prepare the workload.
randomize = args.randomize
# randomize = False
seed = args.seed
if args.workload == "success":
workload = helpers.test.test_joblib_helpers.get_workload1(
randomize, seed=seed
)
elif args.workload == "failure":
workload = helpers.test.test_joblib_helpers.get_workload2()
else:
hdbg.dfatal("Invalid workload='%s'" % args.workload)
# Handle the dst dir.
# dst_dir, clean_dst_dir = hparser.parse_dst_dir_arg(args)
# _ = clean_dst_dir
# Parse command-line options.
dry_run = args.dry_run
num_threads = args.num_threads
incremental = not args.no_incremental
abort_on_error = not args.skip_on_error
num_attempts = args.num_attempts
dst_dir = "."
log_file = os.path.join(dst_dir, "parallel_execute.log")
# Execute.
res = hjoblib.parallel_execute(
workload,
#
dry_run,
num_threads,
incremental,
abort_on_error,
num_attempts,
log_file,
)
if res is None:
print("res=%s" % res)
else:
print("res=\n%s" % "\n".join(map(str, res)))
if __name__ == "__main__":
_main(_parse())
| 28.052632 | 81 | 0.666354 |
575574c4d030e09fcc6818ca2f344e27eb16b24e | 3,327 | py | Python | create_urls.py | vincentsarago/TileSiege | 51a7966ecfab225ae76bc6cadd4dcfbb5c920d39 | [
"BSD-2-Clause"
] | 19 | 2021-08-29T06:43:34.000Z | 2022-02-24T06:31:44.000Z | create_urls.py | vincentsarago/TileSiege | 51a7966ecfab225ae76bc6cadd4dcfbb5c920d39 | [
"BSD-2-Clause"
] | 3 | 2021-08-29T23:51:52.000Z | 2022-02-24T06:05:58.000Z | create_urls.py | vincentsarago/TileSiege | 51a7966ecfab225ae76bc6cadd4dcfbb5c920d39 | [
"BSD-2-Clause"
] | 2 | 2021-12-12T00:17:20.000Z | 2022-02-23T19:45:56.000Z | import bisect
import random
import csv
import os
import sys
import urllib.request
import lzma
import math
import argparse
parser = argparse.ArgumentParser(description='Create a urls.txt for siege.')
parser.add_argument('--maxzoom', type=int, default=19, help='Maximum zoom level, inclusive.')
parser.add_argument('--bbox', type=str,help='Bounding box: min_lon,min_lat,max_lon,max_lat')
args = parser.parse_args()
def _xy(lon,lat):
x = lon/360.0 + 0.5
sinlat = math.sin(math.radians(lat))
y = 0.5 - 0.25 * math.log((1.0 + sinlat) / (1.0 - sinlat)) / math.pi
return x,y
def percentage_split(size, percentages):
prv = 0
cumsum = 0
for idx, p in enumerate(percentages):
cumsum += p
nxt = int(cumsum * size)
yield idx, prv, nxt
prv = nxt
bounds = None
if args.bbox:
min_lon, min_lat, max_lon, max_lat = args.bbox.split(',')
min_x, min_y = _xy(float(min_lon),float(min_lat))
max_x, max_y = _xy(float(max_lon),float(max_lat))
bounds = [min_x,max_y,max_x,min_y] # invert Y
# one week of anonymized tile edge request logs from openstreetmap.org
FILENAME = 'tiles-2021-08-08.txt.xz'
OUTPUT_ROWS = 10000
if not os.path.isfile(FILENAME):
print("Downloading " + FILENAME)
urllib.request.urlretrieve(f'https://planet.openstreetmap.org/tile_logs/{FILENAME}', FILENAME)
# output should be pseudorandom + deterministic.
random.seed(3857)
maxzoom = args.maxzoom
distribution = [2,2,6,12,16,27,38,41,49,56,72,71,99,135,135,136,102,66,37,6] # the total distribution...
total_weight = 0
totals = {}
ranges = {}
tiles = {}
for zoom in range(maxzoom+1):
total_weight = total_weight + distribution[zoom]
totals[zoom] = 0
ranges[zoom] = []
tiles[zoom] = []
with lzma.open(FILENAME,'rt') as f:
reader = csv.reader(f,delimiter=' ')
for row in reader:
split = row[0].split('/')
z = int(split[0])
x = int(split[1])
y = int(split[2])
count = int(row[1])
if z > maxzoom:
continue
if bounds:
f = 1 << z
if (x >= math.floor(bounds[0] * f) and
x <= math.floor(bounds[2] * f) and
y >= math.floor(bounds[1] * f) and
y <= math.floor(bounds[3] * f)):
pass
else:
continue
ranges[z].append(totals[z])
tiles[z].append(row[0])
totals[z] = totals[z] + count
with open('urls.txt','w') as f:
f.write("PROT=http\n")
f.write("HOST=localhost\n")
f.write("PORT=8080\n")
f.write("PATH=\n")
f.write("EXT=pbf\n")
rows = 0
for zoom, start, end in percentage_split(
OUTPUT_ROWS, [distribution[zoom] / total_weight for zoom in range(maxzoom + 1)]
):
rows_for_zoom = end - start
rows += rows_for_zoom
for sample in range(rows_for_zoom):
rand = random.randrange(totals[zoom])
i = bisect.bisect(ranges[zoom],rand)-1
f.write(f"$(PROT)://$(HOST):$(PORT)/$(PATH){tiles[zoom][i]}.$(EXT)\n")
p1 = ' ' if zoom < 10 else ''
p2 = ' ' * (len(str(10000)) - len(str(rows_for_zoom)))
bar = '█' * math.ceil(rows_for_zoom / OUTPUT_ROWS * 60)
print(f"{p1}{zoom} | {p2}{rows_for_zoom} {bar}")
print(f"wrote urls.txt with {rows} requests.")
| 29.972973 | 104 | 0.60024 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.