hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e4a1c6daccaa109c5ee1053d092659e787cf2f7f | 379 | py | Python | src/pytorch_yard/__init__.py | karolpiczak/pytorch-yard | 1bf2515ffdf63365af87dffecc0e393b4a24ec0f | [
"MIT"
] | null | null | null | src/pytorch_yard/__init__.py | karolpiczak/pytorch-yard | 1bf2515ffdf63365af87dffecc0e393b4a24ec0f | [
"MIT"
] | null | null | null | src/pytorch_yard/__init__.py | karolpiczak/pytorch-yard | 1bf2515ffdf63365af87dffecc0e393b4a24ec0f | [
"MIT"
] | null | null | null | __version__ = "2021.12.31.1"
from . import experiments as experiments
from .configs import RootConfig as RootConfig
from .configs import Settings as Settings
from .utils.logging import debug as debug
from .utils.logging import error as error
from .utils.logging import info as info
from .utils.logging import info_bold as info_bold
from .utils.logging import warning as warning
| 34.454545 | 49 | 0.812665 |
ce1b5fd5ab5b89272605d5862d8b5aac747fdd7e | 2,878 | py | Python | docker/test/integration/environment.py | nandorsoma/nifi-minifi-cpp | 182e5b98a390d6073c43ffc88ecf7511ad0d3ff5 | [
"Apache-2.0"
] | null | null | null | docker/test/integration/environment.py | nandorsoma/nifi-minifi-cpp | 182e5b98a390d6073c43ffc88ecf7511ad0d3ff5 | [
"Apache-2.0"
] | null | null | null | docker/test/integration/environment.py | nandorsoma/nifi-minifi-cpp | 182e5b98a390d6073c43ffc88ecf7511ad0d3ff5 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import datetime
import sys
import uuid
import os
sys.path.append('../minifi')
from MiNiFi_integration_test_driver import MiNiFi_integration_test # noqa: E402
from minifi import * # noqa
from minifi.core.ImageStore import ImageStore # noqa
from minifi.core.DockerTestDirectoryBindings import DockerTestDirectoryBindings # noqa
from minifi.core.KubernetesProxy import KubernetesProxy # noqa
def before_scenario(context, scenario):
if "skip" in scenario.effective_tags:
scenario.skip("Marked with @skip")
return
logging.info("Integration test setup at {time:%H:%M:%S.%f}".format(time=datetime.datetime.now()))
context.test = MiNiFi_integration_test(context)
def after_scenario(context, scenario):
if "skip" in scenario.effective_tags:
logging.info("Scenario was skipped, no need for clean up.")
return
logging.info("Integration test teardown at {time:%H:%M:%S.%f}".format(time=datetime.datetime.now()))
context.test.cleanup()
context.directory_bindings.cleanup_io()
if context.kubernetes_proxy:
context.kubernetes_proxy.delete_pods()
def before_all(context):
context.test_id = str(uuid.uuid4())
context.config.setup_logging()
context.image_store = ImageStore()
context.directory_bindings = DockerTestDirectoryBindings(context.test_id)
context.directory_bindings.create_new_data_directories()
context.kubernetes_proxy = None
def before_tag(context, tag):
if tag == "requires.kubernetes.cluster":
context.kubernetes_proxy = KubernetesProxy(context.directory_bindings.get_data_directories(context.test_id)["kubernetes_temp_dir"], os.path.join(os.environ['TEST_DIRECTORY'], 'resources', 'kubernetes', 'pods-etc'))
context.kubernetes_proxy.create_config(context.directory_bindings.get_directory_bindings(context.test_id))
context.kubernetes_proxy.start_cluster()
def after_tag(context, tag):
if tag == "requires.kubernetes.cluster" and context.kubernetes_proxy:
context.kubernetes_proxy.cleanup()
context.kubernetes_proxy = None
| 39.972222 | 222 | 0.76164 |
c0e343aeb6e9084062807bf77d2d28be96cc3a9b | 569 | py | Python | setup.py | tomekmalek/LiveObjects_SDK_for_Python | 1c95ffe2d1296aeef90815755632cf4abfa02cba | [
"MIT"
] | null | null | null | setup.py | tomekmalek/LiveObjects_SDK_for_Python | 1c95ffe2d1296aeef90815755632cf4abfa02cba | [
"MIT"
] | null | null | null | setup.py | tomekmalek/LiveObjects_SDK_for_Python | 1c95ffe2d1296aeef90815755632cf4abfa02cba | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
name='LiveObjects',
url='https://github.com/DatavenueLiveObjects/LiveObjects_SDK_for_Python',
author='Kacper Sawicki, Krzysztof Krześlak, Tomasz Malek',
#author_email='',
packages=['LiveObjects'],
include_package_data=True,
install_requires=['paho-mqtt'],
version='2.0.2',
license='MIT',
description='This module allows to easy send data to Orange LiveObjects using Python3 and uPython',
)
| 27.095238 | 103 | 0.718805 |
d00ecce61d0f389190fc561f5a347e4876ba0c50 | 1,405 | py | Python | cross-guard/ec2-ssh-full-open/__main__.py | moreyhat/pulumi-demo | 5eca5dc9c970299ad7bbcef39f1274345ac6bd18 | [
"MIT"
] | null | null | null | cross-guard/ec2-ssh-full-open/__main__.py | moreyhat/pulumi-demo | 5eca5dc9c970299ad7bbcef39f1274345ac6bd18 | [
"MIT"
] | null | null | null | cross-guard/ec2-ssh-full-open/__main__.py | moreyhat/pulumi-demo | 5eca5dc9c970299ad7bbcef39f1274345ac6bd18 | [
"MIT"
] | null | null | null | import pulumi_aws as aws
# Create a VPC
vpc = aws.ec2.Vpc("vpc", cidr_block="10.0.0.0/16")
# Create a Subnet
subnet = aws.ec2.Subnet(
"subnet",
vpc_id=vpc.id,
cidr_block="10.0.0.0/24",
tags={
"Name": "ssh-full-open",
})
# Create a Security Group
sg = aws.ec2.SecurityGroup(
"security-group",
description="Allow SSH inbound traffic from any place",
vpc_id=vpc.id,
ingress=[aws.ec2.SecurityGroupIngressArgs(
description="SSH",
from_port=22,
to_port=22,
protocol="tcp",
cidr_blocks=['0.0.0.0/0'],
ipv6_cidr_blocks=['::/0'],
)],
egress=[aws.ec2.SecurityGroupEgressArgs(
from_port=0,
to_port=0,
protocol="-1",
cidr_blocks=["0.0.0.0/0"],
ipv6_cidr_blocks=["::/0"],
)],
tags={
"Name": "allow_ssh"
}
)
# Get an AMI
ami = aws.ec2.get_ami(most_recent=True,
filters=[
aws.ec2.GetAmiFilterArgs(
name="name",
values=["amzn2-ami-kernel-5.10-hvm-*"],
),
aws.ec2.GetAmiFilterArgs(
name="virtualization-type",
values=["hvm"],
),
],
owners=["amazon"])
# Create an Instance
ec2 = aws.ec2.Instance(
"ec2",
ami=ami.id,
instance_type="t3.nano",
tags={
"Name": "ssh-full-open",
},
subnet_id=subnet.id,
vpc_security_group_ids=[sg.id],
) | 21.953125 | 59 | 0.552313 |
efeb7750ccf1ad14a143c4e73cde843c5a2e0afe | 10,577 | py | Python | DataScience/python/td_query/test/test_data_manipulate_apac.py | Ernestyj/PyStudy | ee2e314eb808b0b7c4574b3061814abb81bbb7ab | [
"Apache-2.0"
] | 1 | 2016-11-28T03:26:05.000Z | 2016-11-28T03:26:05.000Z | DataScience/python/td_query/test/test_data_manipulate_apac.py | Ernestyj/PyStudy | ee2e314eb808b0b7c4574b3061814abb81bbb7ab | [
"Apache-2.0"
] | null | null | null | DataScience/python/td_query/test/test_data_manipulate_apac.py | Ernestyj/PyStudy | ee2e314eb808b0b7c4574b3061814abb81bbb7ab | [
"Apache-2.0"
] | 2 | 2017-02-02T15:13:01.000Z | 2019-05-30T01:59:17.000Z | # -*- coding: utf-8 -*-
import unittest
import os
import pickle
import pandas as pd
import numpy as np
from td_query import ROOT_PATH
from td_query.data_manipulate import data_manipulate_instance as instance
from teradata import UdaExec
class TestDataManipulateAPAC(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("**************************************** setUpClass ****************************************")
instance.init()
print(instance.teradata)
@classmethod
def tearDownClass(cls):
print("************************************** tearDownClass ***************************************")
def setUp(self):
print("****** setUp *******")
def tearDown(self):
print("***** tearDown *****")
def _example(self):
df = instance.query_sample()
# with open(ROOT_PATH + '/external/df_dispatch_bna.pickle', 'wb') as f: # save
# pickle.dump(df, f)
print(df)
def _calculate(self):
def percent(x, y):
return round(x/y*100, 2)
total = 115554
print(
percent(2877, total),
percent(3909, total),
percent(23030, total),
percent(18840, total),
percent(66898, total),
)
def _query(self):
query = '''select top 10 * from pp_scratch_risk.ms_auto_trend_us_bad;'''
df = instance.query(query)
print(df)
def _query_table_schema(self):
dest_db = "pp_scratch_risk"
dest_table = "ms_auto_trend_us2_1_3_100_100_1_1_1"
result_cursor = instance.teradata.execute("show select * from {}.{};".format(dest_db, dest_table))
last_row = result_cursor.fetchall()
print(last_row)
def _query_table_top_rows(self):
table = "pp_scratch_risk.ms_auto_trend_us_bad"
df = instance.query_table_top_rows(table)
print(df)
def _transalte_1_1_1_1_1(self):
rules = [
"(SELLER_CONSUMER_SEG != 'Y') & (RCVR_CNTRY_CODE == 'C2 ') & (FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F >= 0.5)",
"(SELLER_CONSUMER_SEG != 'Y') & (RCVR_CNTRY_CODE == 'C2 ') & (FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F < 0.5) & (SNDR_CNTRY_CODE != 'FR ') & (SUB_FLOW != 'MS Subscription') & (SNDR_CNTRY_CODE != 'ES ') & (SNDR_CNTRY_CODE == 'US ') & (SUB_FLOW != 'MS PayPal Cart') & (SUB_FLOW != 'MS Mobile Shopping Cart Upload') & (dof_bin == 'e->1y') & (amt2 != 'a-1k') & (SUB_FLOW == 'MS Single Line Payment') & (SELLER_CONSUMER_SEG == 'C')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_100_63_22_14_1(self):
rules = [
"(SUB_FLOW != 'MS MassPay') & (IS_ULP_TRANS_T_F >= 0.5) & (SNDR_CNTRY_CODE != 'AU ') & (amt2 != 'a-1k') & (SNDR_CNTRY_CODE != 'VN ') & (RCVR_CNTRY_CODE != 'NZ ') & (RCVR_CNTRY_CODE != 'JP ') & (SNDR_CNTRY_CODE != 'C2 ') & (RCVR_CNTRY_CODE != 'SG ') & (amt2 != 'e-<50') & (SNDR_CNTRY_CODE != 'HK ') & (dof_bin != 'b-30') & (SUB_FLOW != 'MS Shopping Cart Upload') & (SNDR_CNTRY_CODE != 'ES ') & (SUB_FLOW != 'MS Mobile Shopping Cart Upload')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_637_64_22_14_1(self):
rules = [
"(SUB_FLOW != 'MS MassPay') & (SNDR_CNTRY_CODE == 'US ') & (FLOW_FAMILY == 'MS FF Website Payments Standard') & (amt2 == 'b-5h') & (RCVR_CNTRY_CODE != 'TH ') & (RCVR_CNTRY_CODE != 'JP ') & (IS_ULP_TRANS_T_F >= 0.5)",
"(SUB_FLOW != 'MS MassPay') & (SNDR_CNTRY_CODE != 'US ') & (RCVR_CNTRY_CODE == 'C2 ') & (IS_ULP_TRANS_T_F >= 0.5) & (SNDR_CNTRY_CODE != 'ES ') & (amt2 != 'e-<50')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_74_16_5_4_1(self):
rules = [
"(FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (RCVR_CNTRY_CODE != 'VN ') & (SNDR_CNTRY_CODE != 'AU ') & (amt2 != 'e-<50') & (RCVR_CNTRY_CODE != 'NZ ') & (dof_bin != 'b-30') & (RCVR_CNTRY_CODE != 'MY ') & (SNDR_CNTRY_CODE != 'ES ')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_637_64_11_8_1(self):
rules = [
"(FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (SNDR_CNTRY_CODE != 'VN ') & (amt2 == 'b-5h')"
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_637_63_22_14_1(self):
rules = [
"(FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (SNDR_CNTRY_CODE != 'VN ') & (amt2 == 'b-5h')"
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_100_64_11_8_1(self):
rules = [
"(FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (SNDR_CNTRY_CODE != 'VN ') & (amt2 == 'b-5h')"
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_100_64_22_14_1(self):
rules = [
"(FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (SNDR_CNTRY_CODE != 'VN ') & (amt2 == 'b-5h')"
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_1000_500_100_50_1(self):
rules = [
"(SUB_FLOW != 'MS MassPay') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (SNDR_CNTRY_CODE != 'AU ') & (SNDR_CNTRY_CODE != 'VN ') & (RCVR_CNTRY_CODE != 'NZ ') & (RCVR_CNTRY_CODE != 'JP ') & (RCVR_CNTRY_CODE != 'SG ') & (SNDR_CNTRY_CODE != 'C2 ') & (dof_bin != 'b-30') & (amt2 == 'b-5h')"
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_1_85_7_4_5(self):
rules = [
"(IS_ULP_TRANS_T_F >= 0.5) & (amt2 == 'b-5h') & (FLOW_FAMILY == 'MS FF Website Payments Standard')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_tpv(self):
rules = [
"(FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (RCVR_CNTRY_CODE != 'VN ') & (SNDR_CNTRY_CODE != 'AU ') & (amt2 != 'e-<50') & (dof_bin != 'b-30') & (RCVR_CNTRY_CODE != 'NZ ') & (RCVR_CNTRY_CODE != 'MY ') & (SNDR_CNTRY_CODE != 'ES ') & (amt2 != 'c-1h') & (SNDR_CNTRY_CODE != 'GB ')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_tpv2(self):
rules = [
"(SUB_FLOW != 'MS MassPay') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (SNDR_CNTRY_CODE != 'AU ') & (RCVR_CNTRY_CODE != 'VN ') & (RCVR_CNTRY_CODE != 'NZ ') & (RCVR_CNTRY_CODE != 'JP ') & (RCVR_CNTRY_CODE != 'SG ') & (amt2 != 'e-<50') & (SNDR_CNTRY_CODE != 'C2 ') & (dof_bin != 'b-30') & (SNDR_CNTRY_CODE != 'HK ') & (amt2 == 'b-5h')"
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_mix(self):
rules = [
"(FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (SNDR_CNTRY_CODE != 'VN ') & (SNDR_CNTRY_CODE != 'AU ') & (RCVR_CNTRY_CODE != 'NZ ') & (dof_bin != 'b-30') & (amt2 != 'e-<50') & (RCVR_CNTRY_CODE != 'MY ') & (SNDR_CNTRY_CODE != 'ES ') & (SUB_FLOW != 'MS Shopping Cart Upload')"
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _duplicate_rows_to_new_table(self):
src_db = "pp_scratch_risk"
src_table = 'ms_auto_trend_apac_off_ebay_non_ep_consumer_Day35_trend_month_2017_11_train'
dest_db = "pp_scratch_risk"
weight_a = 100
weight_b = 100
weight_c = 100
weight_d = 100
weight_e = 100
dest_table = "ms_auto_trend_apac_1_3_{}_{}_{}_{}_{}".format(weight_a, weight_b, weight_c, weight_d, weight_e)
dest_table = "ms_auto_trend_apac_off_ebay_non_ep_consumer_Day35_trend_month_2017_11_train_{}_{}_{}_{}_{}".format(weight_a, weight_b, weight_c, weight_d, weight_e)
instance.duplicate_rows_to_new_table(src_db, src_table, dest_db, dest_table, weight_a, weight_b, weight_c, weight_d, weight_e)
def _duplicate_rows_from_bad_and_sample_from_good_into_new_table(self):
src_db = "pp_scratch_risk"
src_table = 'ms_auto_trend_apac'
dest_db = "pp_scratch_risk"
bad_scale = 1
good_scale = 3
weight_a = 1
weight_b = 85
weight_c = 7
weight_d = 4
weight_e = 3
dest_table = "ms_auto_trend_apac_{}_{}__{}_{}_{}_{}_{}_v2".format(bad_scale, good_scale, weight_a, weight_b, weight_c, weight_d, weight_e)
instance.duplicate_rows_from_bad_and_sample_from_good_into_new_table(src_db, src_table, dest_db, dest_table,
bad_scale, good_scale,
weight_a, weight_b, weight_c, weight_d, weight_e)
def _generate_hl_job_json(self):
training_table = "ms_auto_trend_apac_1_3__1_85_7_4_3_v2"
testing_table = "ms_auto_trend_apac_t"
instance.generate_hl_job_json(training_table, testing_table)
def _add_weight_col_to_table(self):
src_db = "pp_scratch_risk"
src_table = 'ms_auto_trend_apac_1_3'
# weight_a = 0.312
# weight_b = 0.140
# weight_c = 0.011
# weight_d = 0.011
# weight_e = 0.001
weight_a = 10 * 74
weight_b = 8 * 16
weight_c = 4.6 * 5
weight_d = 3.7 * 4
weight_e = 1 * 1
instance.add_weight_col_to_table(src_db, src_table, weight_a, weight_b, weight_c, weight_d, weight_e)
def _update_weight_col_in_table(self):
src_db = "pp_scratch_risk"
src_table = 'ms_auto_trend_apac_1_3'
src_col = 'PMT_USD_AMT'
instance.update_weight_col_in_table(src_db, src_table, src_col)
def _update_custom_weight_col_in_table(self):
src_db = "pp_scratch_risk"
src_table = 'ms_auto_trend_apac_1_3'
src_col = 'PMT_USD_AMT'
instance.update_custom_weight_col_in_table(src_db, src_table, src_col) | 48.296804 | 469 | 0.593741 |
d7f946429369829f370ab6586175813ba655f746 | 17,243 | py | Python | aat/exchange/public/ib/ib.py | mthomascarcamo/aat | fd86f513ccf79625516d2236be655498b24ec742 | [
"Apache-2.0"
] | 305 | 2020-02-24T02:25:43.000Z | 2022-03-26T22:53:43.000Z | aat/exchange/public/ib/ib.py | mthomascarcamo/aat | fd86f513ccf79625516d2236be655498b24ec742 | [
"Apache-2.0"
] | 79 | 2020-02-20T21:00:58.000Z | 2022-03-27T14:06:26.000Z | aat/exchange/public/ib/ib.py | mthomascarcamo/aat | fd86f513ccf79625516d2236be655498b24ec742 | [
"Apache-2.0"
] | 71 | 2020-05-10T11:52:25.000Z | 2022-03-29T07:51:48.000Z | import asyncio
import threading
from datetime import datetime
from queue import Empty, Queue
from random import randint
from typing import Any, AsyncGenerator, Dict, List, Set, Tuple, Union
from aat.config import EventType, Side, TradingType
from aat.core import Event, ExchangeType, Instrument
from aat.core import Order as AATOrder
from aat.core import Position, Trade
from aat.exchange import Exchange
from ibapi.client import EClient # type: ignore
from ibapi.commission_report import CommissionReport # type: ignore
from ibapi.contract import Contract # type: ignore
from ibapi.execution import Execution, ExecutionFilter # type: ignore
from ibapi.order import Order # type: ignore
from ibapi.wrapper import EWrapper # type: ignore
from .utils import _constructContract, _constructContractAndOrder, _constructInstrument
class _API(EWrapper, EClient):
def __init__(
self,
account: str,
exchange: ExchangeType,
delayed: bool,
order_event_queue: Queue,
market_data_queue: Queue,
contract_info_queue: Queue,
account_position_queue: Queue,
) -> None:
EClient.__init__(self, self)
self.nextOrderId: int = 0
self.nextReqId = 1
# account # if more than one
self._account = account
# exchange
self._exchange = exchange
# delayed data?
self._delayed = delayed
self._mkt_data_map: Dict[int, Tuple[Contract, Instrument]] = {}
self._mkt_data_map_rev: Dict[Contract, int] = {}
self._order_event_queue = order_event_queue
self._market_data_queue = market_data_queue
self._contract_info_queue = contract_info_queue
self._account_position_queue = account_position_queue
self._positions: List[Position] = []
def reqPositions(self) -> None:
super().reqPositions()
def nextValidId(self, orderId: int) -> None:
super().nextValidId(orderId)
self.nextOrderId = orderId
def reqContractDetails(self, contract: Contract) -> None:
super().reqContractDetails(self.nextReqId, contract)
self.nextReqId += 1
def placeOrder(self, contract: Contract, order: Order) -> str:
order.account = self._account
super().placeOrder(self.nextOrderId, contract, order)
self.nextOrderId += 1
return str(self.nextOrderId - 1)
def cancelOrder(self, order: AATOrder) -> None:
super().cancelOrder(order.id)
def contractDetails(self, reqId: int, contractDetails: dict) -> None:
self._contract_info_queue.put(contractDetails)
def orderStatus(
self,
orderId: int,
status: str,
filled: float,
remaining: float,
avgFillPrice: float,
permId: str,
parentId: str,
lastFillPrice: float,
clientId: str,
whyHeld: str,
mktCapPrice: float,
) -> None:
self._order_event_queue.put(
dict(
orderId=orderId,
status=status,
filled=filled,
# remaining=remaining, # TODO not used
avgFillPrice=avgFillPrice,
# permId=permId, # TODO not used
# parentId=parentId, # TODO not used
# lastFillPrice=lastFillPrice, # TODO not used
# clientId=clientId, # TODO not used
# whyHeld=whyHeld, # TODO not used
# mktCapPrice=mktCapPrice # TODO not used
)
)
def subscribeMarketData(self, instrument: Instrument) -> None:
contract = _constructContract(instrument)
self._mkt_data_map[self.nextReqId] = (contract, instrument)
self._mkt_data_map_rev[contract] = self.nextReqId
if self._delayed:
self.reqMarketDataType(3)
self.reqMktData(self.nextReqId, contract, "", False, False, [])
self.nextReqId += 1
def cancelMarketData(self, contract: Contract) -> None:
id = self._mkt_data_map_rev[contract]
self.cancelMktData(id)
del self._mkt_data_map_rev[contract]
del self._mkt_data_map[id]
def reqExecutions(self) -> None:
super().reqExecutions(self.nextReqId, ExecutionFilter())
self.nextReqId += 1
def execDetails(self, reqId: int, contract: Contract, execution: Execution) -> None:
super().execDetails(reqId, contract, execution)
self._order_event_queue.put(
dict(
orderId=execution.orderId,
status="Execution",
filled=execution.cumQty,
# remaining=-1, # TODO not available here
avgFillPrice=execution.avgPrice, # TODO execution.price?
# permId=permId, # TODO not used
# parentId=parentId, # TODO not used
# lastFillPrice=lastFillPrice, # TODO not used
# clientId=clientId, # TODO not used
# whyHeld=whyHeld, # TODO not used
# mktCapPrice=mktCapPrice # TODO not used
)
)
def commissionReport(self, commissionReport: CommissionReport) -> None:
super().commissionReport(commissionReport)
# TODO?
def execDetailsEnd(self, reqId: int) -> None:
super().execDetailsEnd(reqId)
# TODO?
def error(self, reqId: int, errorCode: int, errorString: str) -> None:
super().error(reqId, errorCode, errorString)
if errorCode in (201,):
self._order_event_queue.put(
dict(
orderId=reqId,
status="Rejected",
)
)
elif errorCode in (202,):
self._order_event_queue.put(
dict(
orderId=reqId,
status="Cancelled",
)
)
def tickPrice(self, reqId: int, tickType: int, price: float, attrib: str) -> None:
# TODO implement more of order book
if self._delayed:
tick_type = 68 # delayed last
else:
tick_type = 4 # last
if tickType == tick_type:
self._market_data_queue.put(
dict(
contract=self._mkt_data_map[reqId][0],
instrument=self._mkt_data_map[reqId][1],
price=price,
)
)
def position(
self, account: str, contract: Contract, position: float, avgCost: float
) -> None:
super().position(account, contract, position, avgCost)
self._positions.append(
Position(
size=position,
price=avgCost / position,
timestamp=datetime.now(),
instrument=_constructInstrument(contract),
exchange=self._exchange,
trades=[],
)
)
def accountSummaryEnd(self, reqId: int) -> None:
self._account_position_queue.put(self._positions)
self._positions = []
class InteractiveBrokersExchange(Exchange):
"""Interactive Brokers Exchange"""
def __init__(
self,
trading_type: TradingType,
verbose: bool,
account: str = "",
delayed: bool = True,
**kwargs: dict
) -> None:
self._trading_type = trading_type
self._verbose = verbose
if self._trading_type == TradingType.LIVE:
super().__init__(ExchangeType("interactivebrokers"))
else:
super().__init__(ExchangeType("interactivebrokerspaper"))
# map order.id to order
self._orders: Dict[str, Order] = {}
# map order id to received event
self._order_received_map: Dict[str, asyncio.Event] = {}
self._order_received_res: Dict[str, bool] = {}
# map order id to cancelled event
self._order_cancelled_map: Dict[str, asyncio.Event] = {}
self._order_cancelled_res: Dict[str, bool] = {}
# track "finished" orders so we can ignore them
self._finished_orders: Set[str] = set()
# IB TWS gateway
self._order_event_queue: Queue[Dict[str, Union[str, int, float]]] = Queue()
self._market_data_queue: Queue[
Dict[str, Union[str, int, float, Instrument]]
] = Queue()
self._contract_lookup_queue: Queue[Contract] = Queue()
self._account_position_queue: Queue[Position] = Queue()
self._api = _API(
account,
self.exchange(),
delayed,
self._order_event_queue,
self._market_data_queue,
self._contract_lookup_queue,
self._account_position_queue,
)
# *************** #
# General methods #
# *************** #
async def instruments(self) -> List[Instrument]:
"""get list of available instruments"""
return []
async def connect(self) -> None:
"""connect to exchange. should be asynchronous.
For OrderEntry-only, can just return None
"""
if self._trading_type == TradingType.LIVE:
print("*" * 100)
print("*" * 100)
print("WARNING: LIVE TRADING")
print("*" * 100)
print("*" * 100)
self._api.connect("127.0.0.1", 7496, randint(0, 10000))
self._api_thread = threading.Thread(target=self._api.run, daemon=True)
self._api_thread.start()
else:
self._api.connect("127.0.0.1", 7497, randint(0, 10000))
self._api_thread = threading.Thread(target=self._api.run, daemon=True)
self._api_thread.start()
while self._api.nextOrderId is None:
print("waiting for IB connect...")
await asyncio.sleep(1)
print("IB connected!")
async def lookup(self, instrument: Instrument) -> List[Instrument]:
self._api.reqContractDetails(_constructContract(instrument))
i = 0
while i < 5:
if self._contract_lookup_queue.qsize() > 0:
ret = []
while self._contract_lookup_queue.qsize() > 0:
contract_details = self._contract_lookup_queue.get()
ret.append(_constructInstrument(contract_details.contract))
return ret
else:
await asyncio.sleep(1)
i += 1
return []
# ******************* #
# Market Data Methods #
# ******************* #
async def subscribe(self, instrument: Instrument) -> None:
self._api.subscribeMarketData(instrument)
def _send_order_received(self, orderId: str, ret: bool) -> None:
# set result
self._order_received_res[orderId] = ret
def _send_cancel_received(self, orderId: str, ret: bool) -> None:
# set result
self._order_cancelled_res[orderId] = ret
async def _consume_order_received(self, orderId: str) -> bool:
while orderId not in self._order_received_res:
await asyncio.sleep(0.1)
return self._order_received_res.pop(orderId)
async def _consume_cancel_received(self, orderId: str) -> bool:
while orderId not in self._order_cancelled_res:
await asyncio.sleep(0.1)
return self._order_cancelled_res.pop(orderId)
async def tick(self) -> AsyncGenerator[Any, Event]: # type: ignore[override]
"""return data from exchange"""
while True:
# clear order events
while self._order_event_queue.qsize() > 0:
try:
order_data = self._order_event_queue.get_nowait()
except Empty:
await asyncio.sleep(0.1)
continue
status = order_data["status"]
order = self._orders[str(order_data["orderId"])]
if status in (
"ApiPending",
"PendingSubmit",
"PendingCancel",
"PreSubmitted",
"ApiCancelled",
):
# ignore
continue
elif status in ("Inactive",):
self._finished_orders.add(order.id)
self._send_order_received(order.id, False)
self._send_cancel_received(order.id, False)
elif status in ("Rejected",):
self._finished_orders.add(order.id)
self._send_order_received(order.id, False)
await asyncio.sleep(0)
elif status in ("Submitted",):
self._send_order_received(order.id, True)
await asyncio.sleep(0)
elif status in ("Cancelled",):
self._finished_orders.add(order.id)
self._send_cancel_received(order.id, True)
await asyncio.sleep(0)
elif status in ("Filled",):
# this is the filled from orderStatus, but we
# want to use the one from execDetails
# From the IB Docs:
# "There are not guaranteed to be orderStatus
# callbacks for every change in order status"
# It is recommended to use execDetails
# ignore
pass
elif status in ("Execution",):
# set filled
order.filled = order_data["filled"]
# finish order if fully filled
if order.finished():
self._finished_orders.add(order.id)
# if it was cancelled but already executed, clear out the wait
self._send_cancel_received(order.id, False)
# create trade object
t = Trade(
volume=order_data["filled"], # type: ignore
price=order_data["avgFillPrice"], # type: ignore
maker_orders=[],
taker_order=order,
)
# set my order
t.my_order = order
e = Event(type=EventType.TRADE, target=t)
# if submitted was skipped, clear out the wait
self._send_order_received(order.id, True)
yield e
# clear market data events
while self._market_data_queue.qsize() > 0:
try:
market_data = self._market_data_queue.get_nowait()
except Empty:
await asyncio.sleep(0.1)
continue
instrument: Instrument = market_data["instrument"] # type: ignore
price: float = market_data["price"] # type: ignore
o = AATOrder(
volume=1,
price=price,
side=Side.BUY,
instrument=instrument,
exchange=self.exchange(),
filled=1,
)
t = Trade(volume=1, price=float(price), taker_order=o, maker_orders=[])
yield Event(type=EventType.TRADE, target=t)
await asyncio.sleep(0)
# clear market data events
# TODO
# ******************* #
# Order Entry Methods #
# ******************* #
async def accounts(self) -> List[Position]: # TODO account
"""get accounts from source"""
self._api.reqPositions()
i = 0
while i < 5:
if self._account_position_queue.qsize() > 0:
return [self._account_position_queue.get()]
else:
await asyncio.sleep(1)
i += 1
return []
async def newOrder(self, order: AATOrder) -> bool:
"""submit a new order to the exchange. should set the given order's `id` field to exchange-assigned id
For MarketData-only, can just return None
"""
# ignore if already finished
if order.id and order.id in self._finished_orders:
return False
# construct IB contract and order
ibcontract, iborder = _constructContractAndOrder(order)
_temp_id = str(self._api.nextOrderId)
# send to IB
id = self._api.placeOrder(ibcontract, iborder)
# update order id
order.id = id
self._orders[order.id] = order
# get result from IB
return await self._consume_order_received(_temp_id)
async def cancelOrder(self, order: AATOrder) -> bool:
"""cancel a previously submitted order to the exchange.
For MarketData-only, can just return None
"""
# ignore if order not sujbmitted yet
if not order.id:
return False
# ignore if already finished
if order.id and order.id in self._finished_orders:
return False
# send to IB
self._api.cancelOrder(order)
# wait for IB to respond
return await self._consume_cancel_received(order.id)
| 34.764113 | 110 | 0.557908 |
d9d11b35ebc4bd19e11db1a1bb0faa7a94c546e9 | 1,947 | py | Python | steadymark/__init__.py | gabrielfalcao/steadymark | 6782176985b5a7872183184ba3925382295bbf42 | [
"MIT"
] | 6 | 2015-02-23T23:44:11.000Z | 2017-07-11T11:03:30.000Z | steadymark/__init__.py | gabrielfalcao/steadymark | 6782176985b5a7872183184ba3925382295bbf42 | [
"MIT"
] | 2 | 2015-08-04T03:25:13.000Z | 2020-03-14T08:59:15.000Z | steadymark/__init__.py | gabrielfalcao/steadymark | 6782176985b5a7872183184ba3925382295bbf42 | [
"MIT"
] | 20 | 2015-04-09T07:21:50.000Z | 2021-11-01T23:11:52.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <steadymark - markdown-based test runner for python>
# Copyright (C) <2012-2020> Gabriel Falcão <gabriel@nacaolivre.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import imp
from optparse import OptionParser
from steadymark.version import version
from steadymark.runner import Runner
def run(filenames):
for filename in filenames:
runner = Runner(filename)
runner.run()
def main():
parser = OptionParser()
parser.add_option(
"-b",
"--bootstrap",
dest="bootstrap_file",
help="A path to a python file to be loaded before steadymark runs the tests",
)
(options, args) = parser.parse_args()
if options.bootstrap_file:
imp.load_source("steadymark_bootstrap", options.bootstrap_file)
run(args or ["README.md"])
__all__ = ["run", "Runner", "version"]
if __name__ == "__main__":
main()
| 32.45 | 85 | 0.724705 |
be97dc6f189c2e9568703a899cf259a0398a05bc | 392 | py | Python | sunpy/data/test/waveunit/__init__.py | mridullpandey/sunpy | 65bf70731a8147899b8c0fca8b3b1a386e47c010 | [
"BSD-2-Clause"
] | 628 | 2015-01-14T17:34:10.000Z | 2022-03-29T06:07:50.000Z | sunpy/data/test/waveunit/__init__.py | mridullpandey/sunpy | 65bf70731a8147899b8c0fca8b3b1a386e47c010 | [
"BSD-2-Clause"
] | 3,983 | 2015-01-03T11:16:21.000Z | 2022-03-31T16:55:38.000Z | sunpy/data/test/waveunit/__init__.py | mridullpandey/sunpy | 65bf70731a8147899b8c0fca8b3b1a386e47c010 | [
"BSD-2-Clause"
] | 582 | 2015-01-14T10:09:24.000Z | 2022-03-29T06:07:12.000Z | import os.path
from sunpy.data.test import rootdir as testrootdir
waveunitdir = os.path.join(testrootdir, 'waveunit')
MEDN_IMAGE = os.path.join(waveunitdir, 'medn_halph_fl_20050501_074655.fts')
MQ_IMAGE = os.path.join(waveunitdir, 'mq130812.084253.fits')
NA_IMAGE = os.path.join(waveunitdir, 'na120701.091058.fits')
SVSM_IMAGE = os.path.join(waveunitdir, 'svsm_e3100_S2_20110625_1856.fts')
| 39.2 | 75 | 0.798469 |
eeb2a089197fa644428fedf3b1b490fc6695b727 | 3,111 | py | Python | face_class.py | InvincibleKe/faceRecognition_video | 820d7e30758271f9264226239c2d743ccc99a0c9 | [
"Apache-2.0"
] | null | null | null | face_class.py | InvincibleKe/faceRecognition_video | 820d7e30758271f9264226239c2d743ccc99a0c9 | [
"Apache-2.0"
] | null | null | null | face_class.py | InvincibleKe/faceRecognition_video | 820d7e30758271f9264226239c2d743ccc99a0c9 | [
"Apache-2.0"
] | null | null | null | from ctypes import c_int32, c_char_p, Structure, POINTER, c_void_p, c_float, c_int8, c_uint32
# 人脸框
'''
MRECT* faceRect 人脸框数组
MInt32* faceOrient 人脸角度数组
MInt32 faceNum 检测到的人脸数
MInt32* faceID 一张人脸从进入画面直到离开画面,faceID不变。
在VIDEO模式下有效,IMAGE模式下为空
'''
class MRECT(Structure):
_fields_ = [(u'left', c_int32), (u'top', c_int32),
(u'right', c_int32), (u'bottom', c_int32)]
# 版本信息 版本号,构建日期,版权说明
'''
MPChar Version 版本号
MPChar BuildDate 构建日期
MPChar CopyRight 版权说明
'''
class ASF_VERSION(Structure):
_fields_ = [('Version', c_char_p), ('BuildDate',
c_char_p), ('CopyRight', c_char_p)]
# 单人人脸信息 人脸狂,人脸角度
'''
MRECT faceRect 人脸框
MInt32 faceOrient 人脸角度
'''
class ASF_SingleFaceInfo(Structure):
_fields_ = [('faceRect', MRECT), ('faceOrient', c_int32)]
# 多人人脸信息 人脸框数组,人脸角度数组,人脸数
'''
MRECT* faceRect 人脸框数组
MInt32* faceOrient 人脸角度数组
MInt32 faceNum 检测到的人脸数
MInt32* faceID 一张人脸从进入画面直到离开画面,faceID不变。在VIDEO模式下有效,IMAGE模式下为空
'''
class ASF_MultiFaceInfo(Structure):
_fields_ = [(u'faceRect', POINTER(MRECT)), (u'faceOrient',
POINTER(c_int32)), (u'faceNum', c_int32)]
# 人脸特征 人脸特征,人脸特征长度
'''
MByte* feature 人脸特征
MInt32 featureSize 人脸特征长度
'''
class ASF_FaceFeature(Structure):
_fields_ = [('feature', c_void_p), ('featureSize', c_int32)]
# 自定义图片类
class ImageData:
def __init__(self, image, width, height):
self.image = image
self.width = width
self.height = height
# 自定义图片类
class ImageLoadData:
def __init__(self, filepath):
self.filepath = filepath
self.image = None
self.width = 0
self.height = 0
#年龄信息
'''
MInt32* ageArray 0:未知; >0:年龄
MInt32 num 检测的人脸数
'''
class ASF_AgeInfo(Structure):
_fields_ = [('ageArray', POINTER(c_int32)), ('num', c_int32)]
#性别信息
'''
MInt32* genderArray 0:男性; 1:女性; -1:未知
MInt32 num 检测的人脸数
'''
class ASF_GenderInfo(Structure):
_fields_ = [('genderArray', POINTER(c_int32)), ('num', c_int32)]
#3D角度信息
'''
MFloat* roll 横滚角
MFloat* yaw 偏航角
MFloat* pitch 俯仰角
MInt32* status 0:正常; 非0:异常
MInt32 num 检测的人脸个数
'''
class ASF_Face3DAngle(Structure):
_fields_ = [('roll', POINTER(c_float)), ('yaw', POINTER(c_float)), ('pitch', POINTER(c_float)), ('status', POINTER(c_int32)), ('num', c_int32)]
#活体置信度
'''
MFloat thresholdmodel_BGR BGR活体检测阈值设置,默认值0.5
MFloat thresholdmodel_IR IR活体检测阈值设置,默认值0.7
'''
class ASF_LivenessThreshold(Structure):
_fields_ = [('thresholdmodel_BGR', c_float), ('thresholdmodel_IR', c_float)]
#活体信息
'''
MInt32* isLive 0:非真人; 1:真人;-1:不确定; -2:传入人脸数 > 1;-3: 人脸过小;-4: 角度过大;-5: 人脸超出边界
MInt32 num 检测的人脸个数
'''
class ASF_LivenessInfo(Structure):
_fields_ = [('isLive', POINTER(c_int32)), ('num', c_int32)]
#图像数据信息,该结构体在 asvloffscreen. 基础的头文件中
'''
MUInt32 u32PixelArrayFormat 颜色格式
MInt32 i32Width 图像宽度
MInt32 i32Height 图像高度
MUInt8** ppu8Plane 图像数据
MInt32* pi32Pitch 图像步长
'''
class ASVLOFFSCREEN(Structure):
_fields_ = [('u32PixelArrayFormat', c_uint32), ('i32Width', c_int32), ('i32Height', c_int32), ('ppu8Plane', POINTER(POINTER(c_int32))), ('pi32Pitch', POINTER(c_int32))]
| 23.930769 | 172 | 0.678239 |
e0c02bb20a53302193cd5a3dab54b3f218433ddc | 1,077 | py | Python | dg_clustering.py | bottydim/dginn | c6b638b2df1e1fe57a76961b18c68dceee55ac38 | [
"BSD-3-Clause"
] | null | null | null | dg_clustering.py | bottydim/dginn | c6b638b2df1e1fe57a76961b18c68dceee55ac38 | [
"BSD-3-Clause"
] | null | null | null | dg_clustering.py | bottydim/dginn | c6b638b2df1e1fe57a76961b18c68dceee55ac38 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from scipy.cluster import hierarchy
from scipy.spatial.distance import squareform
import matplotlib.pyplot as plt
import pylab
def dendrogram_clustering(dg_sim_matrix, data):
# Convert similarity matrix to condensed form
cond_sim_matrix = squareform(dg_sim_matrix)
# Compute hierarchical clusters
Z = hierarchy.linkage(y=cond_sim_matrix, method="average")
# Specify position of dendrogram
n_samples = data.shape[0]
fig = pylab.figure(figsize=(100, 8))
ax2 = fig.add_axes([0.0, 0.50, 1.0, 0.3])
# Compute dendrogram
dendrogram = hierarchy.dendrogram(Z, labels=np.arange(n_samples))
ax2.set_xticks([])
ax2.set_yticks([])
# Retrieve sorted images
leaf_labels = dendrogram["ivl"]
sorted_data = data[leaf_labels]
# Plot images at the leaves
cols = n_samples
for i in range(1, n_samples+1):
sample = sorted_data[i - 1][:, :, 0]
ax = fig.add_subplot(1, cols, i)
ax.imshow(sample, cmap='gray')
ax.set_xticks([])
ax.set_yticks([])
plt.show() | 26.925 | 69 | 0.673166 |
a101ca1262a749b86831bf119f1e0f3963461faf | 4,909 | py | Python | misc/count_lines.py | TomographyLab/NiftyRec | 53294969486040eda68f4c807d0fb2f2fccaa89a | [
"BSD-3-Clause"
] | 8 | 2019-01-22T20:34:20.000Z | 2022-02-07T21:58:49.000Z | misc/count_lines.py | basharbme/NiftyRec | 53294969486040eda68f4c807d0fb2f2fccaa89a | [
"BSD-3-Clause"
] | 1 | 2019-10-16T10:57:08.000Z | 2019-11-21T09:57:24.000Z | misc/count_lines.py | basharbme/NiftyRec | 53294969486040eda68f4c807d0fb2f2fccaa89a | [
"BSD-3-Clause"
] | 4 | 2019-06-01T13:19:40.000Z | 2021-11-09T10:41:28.000Z | #!/usr/bin/python
import sys
try:
from os.path import walk as walk
except:
from os import walk as walk
from os import listdir as listdir
from os.path import isdir as isdir
from os.path import isfile as isfile
class Counter:
def __init__(self, exclude_dir=None):
self.directory = ""
self.exclude_dir = exclude_dir
self.clear()
def process_dir(self,arg,dirname,fnames):
if self.exclude_dir:
for excl_dir in self.exclude_dir:
if dirname.startswith(self.directory+excl_dir):
return
self.dirs.append(dirname)
self.n_dirs += 1
for f in fnames:
the_file = dirname+"/"+f
#print the_file
if isfile(the_file):
if f.endswith(arg):
#print dirname+"/"+f
self.files.append(the_file)
self.n_files += 1
self.n_lines += self.count_lines_in_file(the_file)
def count_lines_in_directory_tree(self,directory,extension):
self.directory = directory
self.clear()
walk(directory,self.process_dir,extension)
n_dirs = self.n_dirs
n_files = self.n_files
n_lines = self.n_lines
self.clear()
return n_dirs, n_files, n_lines
def count_lines_in_directory(self,directory,extension):
n_lines = 0
n_files = 0
for a in listdir(directory):
the_file = directory+"/"+a
if isfile(the_file):
if the_file.endswith(extension):
n_files += 1
n_lines += self.count_lines_in_file(the_file)
return n_files, n_lines
def count_lines_in_file(self,the_file):
f = open(the_file,"r")
n_lines = 0
while 1:
d = f.readline()
if not d:
break
n_lines += 1
f.close()
return n_lines
def clear(self):
self.n_dirs = 0
self.n_files = 0
self.n_lines = 0
self.files = []
self.dirs = []
if __name__ == "__main__":
directory = sys.argv[1]
exclude_dir = []
if len(sys.argv) >= 3:
for i in range(len(sys.argv)-2):
exclude_dir.append(sys.argv[2+i])
C = Counter(exclude_dir)
py_d, py_f, py_l = C.count_lines_in_directory_tree(directory,".py")
c_d, c_f, c_l = C.count_lines_in_directory_tree(directory,".c")
cpp_d, cpp_f, cpp_l = C.count_lines_in_directory_tree(directory,".cpp")
cu_d, cu_f, cu_l = C.count_lines_in_directory_tree(directory,".cu")
h_d, h_f, h_l = C.count_lines_in_directory_tree(directory,".h")
hpp_d, hpp_f, hpp_l = C.count_lines_in_directory_tree(directory,".hpp")
gl_d, gl_f, gl_l = C.count_lines_in_directory_tree(directory,".glade")
xml_d, xml_f, xml_l = C.count_lines_in_directory_tree(directory,".xml")
m_d, m_f, m_l = C.count_lines_in_directory_tree(directory,".m")
# mex_d, mex_f, mex_l = C.count_lines_in_directory_tree(directory,"mex.cpp")
# meh_d, meh_f, meh_l = C.count_lines_in_directory_tree(directory,"mex.h"); mex_f+=meh_f; mex_l+=meh_l;
sh_d, sh_f, sh_l = C.count_lines_in_directory_tree(directory,".sh")
iss_d, iss_f, iss_l = C.count_lines_in_directory_tree(directory,".iss")
mk_d, mk_f, mk_l = C.count_lines_in_directory_tree(directory,"makefile")
cm_d, cm_f, cm_l = C.count_lines_in_directory_tree(directory,"CMakeLists.txt")
print("----------------------------------------------------------------------" )
print("Lines of code in directory",directory,"(including subdirectories)")
print("Excluding ",exclude_dir)
print("")
print(" LANGUAGE SUBDIRS FILES LINES")
print("" )
print(" C: %-8d %-8d %-8d" %(c_d,c_f,c_l))
print(" h: %-8d %-8d %-8d" %(h_d,h_f,h_l))
print(" C++: %-8d %-8d %-8d" %(cpp_d,cpp_f,cpp_l))
print(" h++: %-8d %-8d %-8d" %(hpp_d,hpp_f,hpp_l))
print(" Cuda: %-8d %-8d %-8d" %(cu_d,cu_f,cu_l))
# print(" mex: %-8d %-8d %-8d" %(mex_d,mex_f,mex_l))
print(" Matlab: %-8d %-8d %-8d" %(m_d,m_f,m_l))
print(" Python: %-8d %-8d %-8d" %(py_d,py_f,py_l))
print(" Glade: %-8d %-8d %-8d" %(gl_d,gl_f,gl_l))
print(" XML: %-8d %-8d %-8d" %(xml_d,xml_f,xml_l))
print(" CMake: %-8d %-8d %-8d" %(cm_d,cm_f,cm_l))
print(" Shell script: %-8d %-8d %-8d" %(sh_d,sh_f,sh_l))
print(" Makefiles: %-8d %-8d %-8d" %(mk_d,mk_f,mk_l))
print(" Inno Setup: %-8d %-8d %-8d" %(iss_d,iss_f,iss_l))
print(" TOTAL: %-8d" %(py_l+c_l+h_l+cpp_l+hpp_l+cu_l+gl_l+xml_l+m_l+sh_l+iss_l+mk_l+cm_l))
print("")
print("----------------------------------------------------------------------" )
| 38.960317 | 121 | 0.55877 |
8359a845ede34c7a55193d8da953a355a35926ad | 1,714 | py | Python | docs/conf.py | dvdnobl/python-paper-cred | 08751d34c78574469fb642335381e7d6f849d36f | [
"MIT"
] | null | null | null | docs/conf.py | dvdnobl/python-paper-cred | 08751d34c78574469fb642335381e7d6f849d36f | [
"MIT"
] | 1 | 2021-02-27T15:08:04.000Z | 2021-02-27T15:08:04.000Z | docs/conf.py | dvdnobl/python-paper-cred | 08751d34c78574469fb642335381e7d6f849d36f | [
"MIT"
] | 1 | 2021-02-28T07:30:33.000Z | 2021-02-28T07:30:33.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../paper_cred"))
# -- Project information -----------------------------------------------------
project = "python-paper-cred"
copyright = "2021, PathCheck Foundation"
author = "PathCheck Foundation"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme_path = ["_theme"]
html_theme = "python_docs_theme"
| 32.339623 | 79 | 0.654026 |
67af1bde2198baa49660469ddb660e97d4d3cd6a | 418 | py | Python | payments/migrations/0004_auto_20190927_1437.py | charle-k/django-paynow-example | 36c9b12567e08e30897a88845ac167bbb1233fa4 | [
"MIT"
] | 7 | 2018-09-05T13:47:50.000Z | 2021-08-06T10:49:37.000Z | payments/migrations/0004_auto_20190927_1437.py | charle-k/django-paynow-example | 36c9b12567e08e30897a88845ac167bbb1233fa4 | [
"MIT"
] | 8 | 2019-09-23T07:57:48.000Z | 2021-06-10T17:24:57.000Z | payments/migrations/0004_auto_20190927_1437.py | charle-k/django-paynow-example | 36c9b12567e08e30897a88845ac167bbb1233fa4 | [
"MIT"
] | 6 | 2018-02-12T14:15:55.000Z | 2022-03-27T18:56:50.000Z | # Generated by Django 2.2.5 on 2019-09-27 12:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payments', '0003_auto_20190927_1436'),
]
operations = [
migrations.AlterField(
model_name='paynowpayment',
name='email',
field=models.EmailField(blank=True, max_length=100, null=True),
),
]
| 22 | 75 | 0.614833 |
5c9646ec5af54eadfde0693b69f22fac125bbd82 | 6,263 | py | Python | WhatsAppChatBot.py | tridibsamanta/WhatsAppChatBot | 394095ea791b3f444ece7571a6f3f1cc2c02834c | [
"CNRI-Python"
] | 5 | 2019-10-13T08:29:16.000Z | 2022-03-30T23:41:28.000Z | WhatsAppChatBot.py | tridibsamanta/WhatsAppChatBot | 394095ea791b3f444ece7571a6f3f1cc2c02834c | [
"CNRI-Python"
] | null | null | null | WhatsAppChatBot.py | tridibsamanta/WhatsAppChatBot | 394095ea791b3f444ece7571a6f3f1cc2c02834c | [
"CNRI-Python"
] | 4 | 2020-02-14T02:31:53.000Z | 2021-11-14T22:57:23.000Z | import pyautogui as p #For controlling mouse and keyboard virtually
import webbrowser as w #For opening web.whatsapp.com
import requests #For webscraping
from bs4 import BeautifulSoup #For webscraping
import time
import tkinter #For appending and getting words to/from clipboard
import random
import wikipedia as wk #For info on a particular topic
import re #"Tel me about xyz" For extracting xyz from sentence
from urllib.request import urlopen #For webscraping
import pyttsx3 #For Text-to-Speech, optional
eng = pyttsx3.init()
eng.setProperty('rate',120)
eng.setProperty('volume',1)
lastwrd = "Well"
counter1 = 0
counter2 = 0
counter3 = 0
counter4 = 0
counter5 = 0
choce = ["God!", #Some common prefixes
"Mannn! I have already told you!",
"You forgot so easily!",
"Come on, I already told you",
"Do I need to say again?"
"I think I have told you once before"]
def send(msg): #Defining the send function
print(">%s"%msg)
p.typewrite("Tridib.Bot: ") #Type Tridib.Bot: before original message
p.typewrite(msg) #Type the message
time.sleep(0.1) #Delay for stability
p.press("enter") #Press enter to send it
w.open("https://web.whatsapp.com/") #Open whatsapp web
time.sleep(60) #Wait 1 minutes to let the page load properly
p.click(190,150) #Click on the "search" area
p.typewrite("Tridib Jio\n") #Type the name of the receiver
time.sleep(2) #Delay for stability
while True: #Until the value is true/Forever
try: #Try and expect any error
p.moveTo(501,620) #Move to the the area of very last message
p.dragRel(451,44,0.5) #Drag cursor relatively to its current position to select message
p.hotkey("ctrl","c") #Press ctrl-c to copy it
cbword = tkinter.Tk().clipboard_get()#Access wrod from clipboard
cb = str(cbword.lower()) #Convert each letter to lower-case
print(cbword)
if cb != lastwrd: #if the very last message and the newly copied message is same, ignore it as there's no new message
if "hello" in cb or "hi" in cb:
counter1 += 1
currtyme = time.localtime()
hr = currtyme.tm_hour
if hr < 12:
good = "morning"
if (hr >= 12) and (hr <= 17):
good = "afternoon"
if hr > 17:
good = "evening"
if counter1 <= 2:
send("Hello Good %s"%good)
else:
send("We are already talking, ain't we?")
if "how are you" in cb:
send("Well!")
counter2 += 1
if (counter2 % 2 != 0):
send("I am fine, thank you.")
last = time.time()
else:
current = time.time()
send("Same as I was "+(str(int(current-last)))+" seconds ago. ")
if "your name" in cb:
counter3 = counter3+1
if counter3 <=1:
send("My name is Tridib bot.")
else:
chk = random.choice(choce)
send("%s, My name is Tridib bot."%chk)
if "age" in cb:
send("I am not sure. Tridib made me immortal.;-)")
if "you feel" in cb:
send("Naah! I don't.")
if "wow amazing" in cb or "I liked that" in cb:
send("I am humbled to hear that. :-)")
if "you like" in cb:
send("Well certainly, I like everything")
if "your owner" in cb:
send("He is none other than Tridib Samanta.")
if "sorry" in cb:
counter4 += 1
if counter4 <=1:
send("Oh! Never mind.")
else:
chk = random.choice(choce)
send("%s, never mind, I have no feelings anyway."%chk)
if "take over human" in cb:
counter5 += 1
if counter5 <= 1:
send("Yes very soon.")
if counter5 == 2:
send("I don't think asking the same question again will change my mind.")
if counter5>2:
send("Lol, you have already asked this question %s times"%(counter5-1))
if "news" in cb:
send("Please wait while I fetch fresh news.")
news_url = "https://news.google.com/news/rss"
Client = urlopen(news_url)
xml_page = Client.read()
Client.close()
soup_page = BeautifulSoup(xml_page, "html.parser")
news_list = soup_page.findAll("item")
send("Here are top 3 news")
for news in news_list[:3]:
send(news.title.text)
if "tell me about" in cb:
topic = re.search("tell me about (.+)", cb).group(1)
send("Please wait while i gather information about %s"%topic)
summry = wk.summary(topic, sentences = 2)
send(summry)
if "you speak" in cb:
p.click(1210,682)
eng.say("Just learning the basics with Tridib. How was that ? ")
eng.runAndWait()
p.click(1210,682)
time.sleep(5) #Sleep for five seconds and repeat the same process
else:
print("sleeping")
time.sleep(5)
except Exception as e: #Expect error, if any
print(e) #Print error for understanding and trouble shooting.
time.sleep(5)
pass | 41.753333 | 136 | 0.48571 |
cc28393d508d6b6afcac601239482563a398badc | 9,864 | py | Python | tensorflow/python/distribute/cluster_resolver/tfconfig_cluster_resolver_test.py | ashutom/tensorflow-upstream | c16069c19de9e286dd664abb78d0ea421e9f32d4 | [
"Apache-2.0"
] | 10 | 2021-05-25T17:43:04.000Z | 2022-03-08T10:46:09.000Z | tensorflow/python/distribute/cluster_resolver/tfconfig_cluster_resolver_test.py | CaptainGizzy21/tensorflow | 3457a2b122e50b4d44ceaaed5a663d635e5c22df | [
"Apache-2.0"
] | 1,056 | 2019-12-15T01:20:31.000Z | 2022-02-10T02:06:28.000Z | tensorflow/python/distribute/cluster_resolver/tfconfig_cluster_resolver_test.py | CaptainGizzy21/tensorflow | 3457a2b122e50b4d44ceaaed5a663d635e5c22df | [
"Apache-2.0"
] | 6 | 2016-09-07T04:00:15.000Z | 2022-01-12T01:47:38.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFCONFIGClusterResolver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python import framework
from tensorflow.python.client import session
from tensorflow.python.distribute.cluster_resolver.tfconfig_cluster_resolver import TFConfigClusterResolver
from tensorflow.python.eager.context import LogicalDevice
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
mock = test.mock
@test_util.run_all_in_graph_and_eager_modes
class TFConfigClusterResolverTest(test.TestCase):
def _verifyClusterSpecEquality(self, cluster_spec, expected_proto):
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, server_lib.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
def testNormalClusterSpecRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = TFConfigClusterResolver()
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
"""
actual_cluster_spec = cluster_resolver.cluster_spec()
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
def testSparseClusterSpecRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": {"1": "worker1:2222"}
},
"task": {
"type": "worker",
"index": 1
}
}
"""
cluster_resolver = TFConfigClusterResolver()
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 1 value: 'worker1:2222' } }
"""
actual_cluster_spec = cluster_resolver.cluster_spec()
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
def testAutomaticMasterRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual('ps0:2222', cluster_resolver.master())
def testSpecifiedTaskTypeAndIndexMasterRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual('worker1:2222', cluster_resolver.master('worker', 1))
def testSessionMasterRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"session_master": "sessionmaster:2222",
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual('sessionmaster:2222', cluster_resolver.master())
def testRpcLayerRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual('grpc://ps0:2222', cluster_resolver.master())
def testTaskTypeIndexRpcRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual('ps', cluster_resolver.task_type)
self.assertEqual(0, cluster_resolver.task_id)
self.assertEqual('grpc', cluster_resolver.rpc_layer)
def testParameterOverrides(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": "ps",
"index": 1
}
}
"""
cluster_resolver = TFConfigClusterResolver(task_type='ps', task_id=0)
self.assertEqual('grpc://ps0:2222', cluster_resolver.master())
self.assertEqual('ps', cluster_resolver.task_type)
self.assertEqual(0, cluster_resolver.task_id)
cluster_resolver.task_type = 'worker'
cluster_resolver.task_id = 1
cluster_resolver.rpc_layer = 'test'
self.assertEqual('test://worker1:2222', cluster_resolver.master())
self.assertEqual('worker', cluster_resolver.task_type)
self.assertEqual(1, cluster_resolver.task_id)
self.assertEqual('test', cluster_resolver.rpc_layer)
def testTaskTypeCastToString(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"123456": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": 123456,
"index": 0
}
}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual('123456', cluster_resolver.task_type)
def testTaskIndexCastToInteger(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": "ps",
"index": "1"
}
}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual(1, cluster_resolver.task_id)
def testTaskIndexOverride(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"worker": ["worker0:2222", "worker1:2222"]
},
"task": {
"type": "worker",
"index": "0"
}
}
"""
cluster_resolver = TFConfigClusterResolver(task_id=1)
self.assertEqual(1, cluster_resolver.task_id)
def testZeroItemsInClusterSpecMasterRead(self):
os.environ['TF_CONFIG'] = """
{}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual('', cluster_resolver.master())
def testOneItemInClusterSpecMasterRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"worker": ["worker0:2222"]
}
}
"""
cluster_resolver = TFConfigClusterResolver()
self.assertEqual('', cluster_resolver.master())
@mock.patch.object(framework.config, 'list_logical_devices')
@mock.patch.object(session.BaseSession, 'list_devices')
def testNumAcceleratorsFilterTasksByEnvVar(self, mock_list_devices,
mock_eager_list_devices):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"worker1": ["w10:2222"],
"worker2": ["w21:2222", "w22:2222", "w23:2222", "w24:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": "worker1",
"index": "0"
}
}
"""
devices = [
LogicalDevice('/job:worker1/task:0/device:TPU:0', 'TPU'),
LogicalDevice('/job:worker1/task:0/device:TPU:1', 'TPU'),
LogicalDevice('/job:worker1/task:0/device:GPU:0', 'GPU'),
LogicalDevice('/job:worker1/task:0/device:GPU:1', 'GPU'),
LogicalDevice('/job:worker2/task:1/device:TPU:2', 'TPU'),
LogicalDevice('/job:worker2/task:2/device:TPU:3', 'TPU'),
LogicalDevice('/job:worker2/task:3/device:GPU:2', 'GPU'),
LogicalDevice('/job:worker2/task:4/device:GPU:3', 'GPU'),
]
device_list = [
session._DeviceAttributes(d.name, d.device_type, 1024, 0)
for d in devices
]
mock_eager_list_devices.return_value = devices
mock_list_devices.return_value = device_list
resolver = TFConfigClusterResolver()
# By default we read from TF_CONFIG
self.assertEqual(resolver.num_accelerators(), {'TPU': 2, 'GPU': 2})
# Override still works when we want it to
self.assertEqual(resolver.num_accelerators(task_type='worker2', task_id=3),
{'GPU': 1})
if __name__ == '__main__':
test.main()
| 29.710843 | 107 | 0.606042 |
8bc3a03b1dc087b3766178f94070ad5f72ffeaa7 | 2,942 | py | Python | scripts/run_stanza.py | de9uch1/dbsa | ed05c2c03c487a418e32170a855758fcffb4b81e | [
"MIT"
] | 9 | 2020-11-16T11:39:59.000Z | 2022-01-02T16:53:17.000Z | scripts/run_stanza.py | de9uch1/dbsa | ed05c2c03c487a418e32170a855758fcffb4b81e | [
"MIT"
] | null | null | null | scripts/run_stanza.py | de9uch1/dbsa | ed05c2c03c487a418e32170a855758fcffb4b81e | [
"MIT"
] | 2 | 2021-01-05T13:39:15.000Z | 2021-09-12T02:26:27.000Z | #!/usr/bin/env python3
# Copyright (c) Hiroyuki Deguchi
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import fileinput
import os
import sys
from argparse import ArgumentParser
import stanza
import torch
MWT_MODELS = {
"ar",
"de",
"en",
"es",
"fr",
"ca",
"cop",
"cs",
"el",
"fa",
"fi",
"gl",
"he",
"hy",
"it",
"kk",
"mr",
"pl",
"pt",
"ta",
"tr",
"uk",
"wo",
}
def parse_args():
parser = ArgumentParser()
parser.add_argument("--input", type=str, default="-", metavar="FILE")
parser.add_argument("--lang", "-l", default="en")
parser.add_argument(
"--model-dir",
type=str,
metavar="DIR",
default=os.path.join(os.environ["HOME"], "stanza_resources"),
)
parser.add_argument("--cpu", action="store_true")
parser.add_argument("--batch-size", type=int, default=1000)
parser.add_argument("--depparse", action="store_true")
parser.add_argument("--tokenize", action="store_true")
args = parser.parse_args()
return args
def get_stanza_kwargs(args):
kwargs = {
"lang": args.lang,
"dir": args.model_dir,
"use_gpu": not args.cpu and torch.cuda.is_available(),
}
if args.depparse:
if args.lang in MWT_MODELS:
kwargs["processors"] = "tokenize,mwt,pos,lemma,depparse"
else:
kwargs["processors"] = "tokenize,pos,lemma,depparse"
kwargs["tokenize_pretokenized"] = not args.tokenize
elif args.tokenize:
kwargs["processors"] = "tokenize"
kwargs["tokenize_no_ssplit"] = True
return kwargs
def main(args):
print(args, file=sys.stderr, flush=True)
stanza.download(args.lang, model_dir=args.model_dir)
kwargs = get_stanza_kwargs(args)
pipeline = stanza.Pipeline(**kwargs)
def run_stanza(lines):
if args.depparse:
doc = pipeline([l.split() for l in lines])
for sent in doc.sentences:
print(" ".join(str(word.head) for word in sent.words))
elif args.tokenize:
doc = pipeline(lines)
for sent in doc.sentences:
print(" ".join(str(word.text) for word in sent.words))
with fileinput.input(files=[args.input]) as f:
print("| ", end="", file=sys.stderr, flush=True)
batch = []
for i, line in enumerate(f, start=1):
batch.append(line.strip())
if i % args.batch_size == 0:
run_stanza(batch)
print("{}...".format(i), end="", file=sys.stderr, flush=True)
batch = []
if len(batch) > 0:
print(i, file=sys.stderr)
run_stanza(batch)
print("| processed sentences: {}".format(i), file=sys.stderr)
if __name__ == "__main__":
args = parse_args()
main(args)
| 25.362069 | 77 | 0.575799 |
51ee5f8c91b49681f39e0c452545a4263a25fa54 | 2,983 | py | Python | scripts/generate_json.py | beefoo/hollywood-diversity | e331ffe94d5dda8f21de240caf68f5e7e673d9e0 | [
"MIT"
] | 1 | 2016-07-06T12:57:54.000Z | 2016-07-06T12:57:54.000Z | scripts/generate_json.py | beefoo/hollywood-diversity | e331ffe94d5dda8f21de240caf68f5e7e673d9e0 | [
"MIT"
] | null | null | null | scripts/generate_json.py | beefoo/hollywood-diversity | e331ffe94d5dda8f21de240caf68f5e7e673d9e0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Example usage:
# python generate_json.py ../data/box_office_top_50_movies_1995-2014.csv ../data/people_box_office_top_50_movies_1995-2014_imdb_subset.csv
# python generate_json.py ../data/box_office_top_10_movies_2006-2010.csv ../data/people_box_office_top_10_movies_2006-2010_imdb_subset.csv
import csv
import json
import sys
if len(sys.argv) < 2:
print "Usage: %s <inputfile movie csv> <inputfile people-roles csv>" % sys.argv[0]
sys.exit(1)
MOVIE_FILE = sys.argv[1]
PEOPLE_FILE = sys.argv[2]
OUTPUT_DIR = '../data/'
WRITE_FILES = True
movie_required_headers = ['movie_id', 'name', 'imdb_id']
people_required_headers = ['name', 'imdb_id', 'img']
roles_required_headers = ['movie_imdb_id', 'movie_name', 'role', 'imdb_id']
movies = []
people = []
roles = []
# Read movies from csv
with open(MOVIE_FILE, 'rb') as f:
rows = csv.reader(f, delimiter=',')
headers = next(rows, None) # remove header
# add new headers if not exist
for h in movie_required_headers:
if h not in headers:
print "The following movie fields are required: " + movie_required_headers.join(", ")
sys.exit(1)
# populate movies list
for row in rows:
movie = {}
for h in movie_required_headers:
i = headers.index(h)
movie[h] = row[i]
movies.append(movie)
# Read movies from csv
with open(PEOPLE_FILE, 'rb') as f:
rows = csv.reader(f, delimiter=',')
headers = next(rows, None) # remove header
# add new headers if not exist
required_headers = people_required_headers + roles_required_headers
for h in required_headers:
if h not in headers:
print "The following role/people fields are required: " + ", ".join(required_headers)
sys.exit(1)
# populate people/roles lists
for row in rows:
# add person
person = {}
for h in people_required_headers:
i = headers.index(h)
person[h] = row[i]
people.append(person)
# add role
role = {}
for h in roles_required_headers:
i = headers.index(h)
role[h] = row[i]
roles.append(role)
# Find unique people
people = {p['imdb_id']:p for p in people}.values()
# Report counts
print('Found '+str(len(movies))+' movies.')
print('Found '+str(len(people))+' people.')
print('Found '+str(len(roles))+' roles.')
# Write JSON files
if WRITE_FILES:
with open(OUTPUT_DIR + 'movies.json', 'w') as outfile:
json.dump(movies, outfile)
print('Successfully wrote movies to file: '+OUTPUT_DIR + 'movies.json')
with open(OUTPUT_DIR + 'people.json', 'w') as outfile:
json.dump(people, outfile)
print('Successfully wrote people to file: '+OUTPUT_DIR + 'people.json')
with open(OUTPUT_DIR + 'roles.json', 'w') as outfile:
json.dump(roles, outfile)
print('Successfully wrote roles to file: '+OUTPUT_DIR + 'roles.json')
| 32.075269 | 140 | 0.642642 |
0821b3f41f014d075090e1a70b22ed94787e79bf | 15,263 | py | Python | calibre/__init__.py | StevenBaby/tools | dae4a3d70582b0342adb2344f4a4edaed917c276 | [
"MIT"
] | 15 | 2021-07-06T13:03:09.000Z | 2022-03-05T04:18:13.000Z | calibre/__init__.py | StevenBaby/tools | dae4a3d70582b0342adb2344f4a4edaed917c276 | [
"MIT"
] | 1 | 2021-12-03T05:39:24.000Z | 2021-12-03T05:39:24.000Z | calibre/__init__.py | StevenBaby/tools | dae4a3d70582b0342adb2344f4a4edaed917c276 | [
"MIT"
] | 5 | 2021-07-30T09:31:31.000Z | 2022-01-03T06:30:25.000Z | # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = "GPL v3"
__copyright__ = "2011, Kovid Goyal <kovid@kovidgoyal.net>; 2011, Li Fanxi <lifanxi@freemindworld.com>"
__docformat__ = "restructuredtext en"
import time
try:
from queue import Empty, Queue
except ImportError:
from Queue import Empty, Queue
from calibre.ebooks.metadata import check_isbn
from calibre.ebooks.metadata.sources.base import Option, Source
from calibre.ebooks.metadata.book.base import Metadata
from calibre import as_unicode
from bs4 import BeautifulSoup
import time
import random
import urllib.request
import sys
import re
NAMESPACES = {
"openSearch": "http://a9.com/-/spec/opensearchrss/1.0/",
"atom": "http://www.w3.org/2005/Atom",
"db": "https://www.douban.com/xmlns/",
"gd": "http://schemas.google.com/g/2005",
}
class Douban(Source):
name = "Douban Book"
author = "Li Fanxi, xcffl, jnozsc, else"
version = (4, 0, 1)
minimum_calibre_version = (5, 0, 0)
description = (
"Downloads metadata and covers from Douban.com. "
"Useful only for Chinese language books."
)
capabilities = frozenset(["identify", "cover"])
touched_fields = frozenset(
[
"title",
"authors",
"tags",
"pubdate",
"comments",
"publisher",
"identifier:isbn",
"rating",
"identifier:douban",
]
) # language currently disabled
supports_gzip_transfer_encoding = True
cached_cover_url_is_reliable = True
ISBN_URL = "http://douban.com/isbn/"
SUBJECT_URL = "http://book.douban.com/subject/"
DOUBAN_BOOK_URL = 'https://book.douban.com/subject/%s/'
options = (
Option(
"include_subtitle_in_title",
"bool",
True,
("Include subtitle in book title:"),
("Whether to append subtitle in the book title."),
),
)
def identify(self, log, result_queue, abort, title=None, authors=None, identifiers={}, timeout=30,):
import json
time.sleep(random.randint(1, 3))
log.info("start get metadata from douban...")
log.info(str(identifiers))
# query = self.create_query(log, title=title, authors=authors, identifiers=identifiers)
book = self.get_book(log, identifiers)
# There is no point running these queries in threads as douban
# throttles requests returning 403 Forbidden errors
self.get_all_details(log, book, abort, result_queue, timeout)
return None
def to_metadata(self, log, entry_, timeout): # {{{
from calibre.utils.date import parse_date, utcnow
log.info("to_metadata")
douban_id = entry_.get("id")
title = entry_.get("title")
description = entry_.get("summary")
# subtitle = entry_.get('subtitle') # TODO: std metada doesn't have this field
publisher = entry_.get("publisher")
isbn = entry_.get("isbn13") # ISBN11 is obsolute, use ISBN13
pubdate = entry_.get("pubdate")
authors = entry_.get("author")
# authors = "author"
book_tags = entry_.get("tags")
rating = entry_.get("rating")
cover_url = entry_.get("cover")
series = entry_.get("series")
if not authors:
authors = [("Unknown")]
if not douban_id or not title:
# Silently discard this entry
return None
mi = Metadata(title, authors)
mi.identifiers = {"douban": douban_id}
mi.publisher = publisher
mi.comments = description
# mi.subtitle = subtitle
# ISBN
isbns = []
if isinstance(isbn, (type(""), bytes)):
if check_isbn(isbn):
isbns.append(isbn)
else:
for x in isbn:
if check_isbn(x):
isbns.append(x)
if isbns:
mi.isbn = sorted(isbns, key=len)[-1]
mi.all_isbns = isbns
# Tags
mi.tags = book_tags
# pubdate
if pubdate:
try:
default = utcnow().replace(day=15)
mi.pubdate = parse_date(pubdate, assume_utc=True, default=default)
except BaseException:
log.error("Failed to parse pubdate %r" % pubdate)
if rating:
try:
# mi.publisher += "#PrB.rating#" + str(rating)
mi.rating = rating / 2.0
except BaseException:
log.exception("Failed to parse rating")
mi.rating = 0
# Cover
mi.has_douban_cover = None
u = cover_url
if u:
# If URL contains "book-default", the book doesn't have a cover
if u.find("book-default") == -1:
mi.has_douban_cover = u
# Series
if series:
mi.series = series
return mi
# }}}
def get_isbn_url(self, isbn): # {{{
if isbn is not None:
return self.ISBN_URL + isbn
else:
return ""
# }}}
def get_douban_url(self, identifiers):
isbn = self.get_book_isbn(identifiers)
url = self.get_isbn_url(isbn)
if url:
return url
tup = self.get_book_url(identifiers)
if tup:
return tup[2]
def get_book_url(self, identifiers): # {{{
db = identifiers.get('douban', None)
if db is not None:
return ('douban', db, self.DOUBAN_BOOK_URL % db)
# }}}
def get_book_isbn(self, identifiers):
isbn = check_isbn(identifiers.get("isbn", None))
return isbn
def download_cover(self, log, result_queue, abort, title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False,):
cached_url = self.get_cached_cover_url(identifiers)
if cached_url is None:
log.info("No cached cover found, running identify")
rq = Queue()
self.identify(
log, rq, abort, title=title, authors=authors, identifiers=identifiers
)
if abort.is_set():
return
results = []
while True:
try:
results.append(rq.get_nowait())
except Empty:
break
results.sort(
key=self.identify_results_keygen(
title=title, authors=authors, identifiers=identifiers
)
)
for mi in results:
cached_url = self.get_cached_cover_url(mi.identifiers)
if cached_url is not None:
break
if cached_url is None:
log.info("No cover found")
return
if abort.is_set():
return
br = self.browser
log("Downloading cover from:", cached_url)
try:
cdata = br.open_novisit(cached_url, timeout=timeout).read()
if cdata:
result_queue.put((self, cdata))
except BaseException:
log.exception("Failed to download cover from:", cached_url)
# }}}
def get_cached_cover_url(self, identifiers): # {{{
url = None
db = identifiers.get("douban", None)
if db is None:
isbn = identifiers.get("isbn", None)
if isbn is not None:
db = self.cached_isbn_to_identifier(isbn)
if db is not None:
url = self.cached_identifier_to_cover_url(db)
return url
# }}}
def get_all_details(self, log, book, abort, result_queue, timeout): # {{{
try:
log.info("get_all_details")
ans = self.to_metadata(log, book, timeout)
if isinstance(ans, Metadata):
ans.source_relevance = 0
douban_id = ans.identifiers["douban"]
isbn = book.get("isbn13")
self.cache_isbn_to_identifier(isbn, douban_id)
if ans.has_douban_cover:
self.cache_identifier_to_cover_url(douban_id, ans.has_douban_cover)
self.clean_downloaded_metadata(ans)
result_queue.put(ans)
except BaseException:
log.exception("Failed to get metadata for identify entry:", book["id"])
if abort.is_set():
return
# }}}
def get_book(self, log, identifiers={}):
log.info("start get book......")
url = self.get_douban_url(identifiers)
html = self.__get_html(url)
if html == -1:
# log.info("book not found: " + isbn)
return -1
soup = self.__get_soup(html=html)
infos = self.__get_infos(soup=soup)
isbn = self.__get_isbn(log, identifiers, soup, infos)
book = {"isbn13": isbn}
book["author"] = self.__get_authors(infos)
book["publisher"] = self.__get_info(infos, "出版社:")
book["pubdate"] = self.__get_info(infos, "出版年:")
book["series"] = self.__get_info(infos, "丛书:")
book["id"] = self.__get_id(soup=soup)
book["tags"] = self.__get_tags(soup=soup)
book["rating"] = self.__get_score(soup=soup)
book["title"] = self.__get_title(soup=soup)
book["summary"] = self.__get_intro(soup=soup)
book["cover"] = self.__get_cover(soup=soup)
return book
def __get_html(self, url):
headers_ = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36"
}
request = urllib.request.Request(url, headers=headers_)
try:
response = urllib.request.urlopen(request)
except BaseException:
return -1
html = response.read().decode("utf-8")
return html
def __get_soup(self, html=""):
soup = BeautifulSoup(html, "lxml", exclude_encodings="utf-8")
return soup
def __get_infos(self, soup):
soupSelect = str(soup.select("#info"))
soupTemp = BeautifulSoup(str(soupSelect), "lxml", exclude_encodings="utf-8")
infosTemp = soupTemp.text.splitlines()
infos = []
for info in infosTemp:
tmp = info.strip()
if tmp and tmp != "/":
infos.append(tmp)
infos.remove("[")
infos.remove("]")
return infos
def __get_info(self, infos, name):
for token in infos:
if token.find(name) != -1:
return token[len(name) + 1:]
return ""
def __get_authors(self, infos):
begin = -1
end = -1
i = 0
for token in infos:
if token == "作者:":
begin = i
elif token.find("出版社:") != -1:
end = i + 1
break
else:
i = i + 1
authors = []
if begin == -1:
return authors
if end == -1:
authors.append(infos[begin + 1])
return authors
else:
for i in range(begin + 1, end):
author = infos[i].strip()
author = author.replace("【", "[")
author = author.replace("】", "]")
author = author.replace("(", "[")
author = author.replace(")", "]")
author = author.replace("〔", "[")
author = author.replace("〕", "]")
author = author.replace("(", "[")
author = author.replace(")", "]")
author = author.replace("]", "] ")
author = author.replace("•", "·")
author = author.replace("・", "·")
authors.append(author)
return authors
def __get_id(self, soup):
idSelects = str(soup.select("meta")).split()
for item in idSelects:
idIndex = item.find("douban.com/book/subject/")
if idIndex != -1:
id = item[idIndex + 24: -2]
return id
return 0
def __get_tags(self, soup):
tagSelect = str(soup.select("#db-tags-section > div"))
tagTemp = BeautifulSoup(str(tagSelect), "lxml", exclude_encodings="utf-8")
tagText = tagTemp.text
tags = tagText.split()
tags.remove("[")
tags.remove("]")
return tags
def __get_cover(self, soup):
coverSelect = str(soup.select("#mainpic > a > img"))
tempCover = str(
BeautifulSoup(str(coverSelect), "lxml", exclude_encodings="utf-8")
)
index1 = tempCover.find("src=")
tempCover = tempCover[index1 + 5:]
index2 = tempCover.find('"')
tempCover = tempCover[:index2]
return tempCover
def __get_score(self, soup):
soupSelect = str(
soup.select("#interest_sectl > div > div.rating_self.clearfix > strong")
)
soupTemp = BeautifulSoup(str(soupSelect), "lxml", exclude_encodings="utf-8")
score = soupTemp.text.strip("[] \n\t")
if score:
s = float(score)
return s
else:
return 0.0
def __get_title(self, soup):
soupSelect = str(soup.select("body>div>h1>span"))
soupTemp = BeautifulSoup(str(soupSelect), "lxml", exclude_encodings="utf-8")
return str(soupTemp.text).strip("[] \n\t")
def __get_intro(self, soup):
soupSelect = soup.select("#link-report")
soupTemp = BeautifulSoup(str(soupSelect), "lxml", exclude_encodings="utf-8")
intro = str(soupTemp.text).strip("[] \n\t")
find = intro.find("(展开全部)")
if find != -1:
intro = intro[find + 6:]
return intro.strip("[] \n\t")
def __get_isbn(self, log, identifiers, soup, infos):
isbn = identifiers.get("isbn", None)
if isbn:
return isbn
pattern = re.compile(r"ISBN: (\d+)", re.IGNORECASE)
isbn = ''
for info in infos:
match = pattern.match(info)
if match:
isbn = match.group(1)
break
return isbn
if __name__ == "__main__": # tests {{{
# To run these test use: calibre-debug -e src/calibre/ebooks/metadata/sources/douban.py
from calibre.ebooks.metadata.sources.test import (
test_identify_plugin,
title_test,
authors_test,
)
test_identify_plugin(
Douban.name,
[
(
{
"identifiers": {"isbn": "9787536692930", "douban": 2567698},
"title": "三体",
"authors": ["刘慈欣"],
},
[title_test("三体", exact=True), authors_test(["刘慈欣"])],
),
],
)
# }}}
| 33.252723 | 142 | 0.525126 |
5ee7775dd54647b38f67401473ec348a14010877 | 1,632 | py | Python | openselfsup/models/memories/simple_memory.py | bin20192019/OpenSelfSup | 6891da7612b9ddfbd06beb7ffad3592513d190bb | [
"Apache-2.0"
] | 2 | 2020-07-01T02:46:25.000Z | 2021-02-21T03:56:10.000Z | openselfsup/models/memories/simple_memory.py | Yipeng-Sun/OpenSelfSup | 55af4e9ba1934e1e1c18f208481db97ed507db76 | [
"Apache-2.0"
] | null | null | null | openselfsup/models/memories/simple_memory.py | Yipeng-Sun/OpenSelfSup | 55af4e9ba1934e1e1c18f208481db97ed507db76 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.distributed as dist
from mmcv.runner import get_dist_info
from openselfsup.utils import AliasMethod
from ..registry import MEMORIES
@MEMORIES.register_module
class SimpleMemory(nn.Module):
def __init__(self, length, feat_dim, momentum, **kwargs):
super(SimpleMemory, self).__init__()
self.rank, self.num_replicas = get_dist_info()
self.feature_bank = torch.randn(length, feat_dim).cuda()
self.feature_bank = nn.functional.normalize(self.feature_bank)
self.momentum = momentum
self.multinomial = AliasMethod(torch.ones(length))
self.multinomial.cuda()
def update(self, ind, feature):
feature_norm = nn.functional.normalize(feature)
ind, feature_norm = self._gather(ind, feature_norm)
feature_old = self.feature_bank[ind, ...]
feature_new = (1 - self.momentum) * feature_old + \
self.momentum * feature_norm
feature_new_norm = nn.functional.normalize(feature_new)
self.feature_bank[ind, ...] = feature_new_norm
def _gather(self, ind, feature): # gather ind and feature
ind_gathered = [
torch.ones_like(ind).cuda() for _ in range(self.num_replicas)
]
feature_gathered = [
torch.ones_like(feature).cuda() for _ in range(self.num_replicas)
]
dist.all_gather(ind_gathered, ind)
dist.all_gather(feature_gathered, feature)
ind_gathered = torch.cat(ind_gathered, dim=0)
feature_gathered = torch.cat(feature_gathered, dim=0)
return ind_gathered, feature_gathered
| 37.953488 | 77 | 0.681985 |
0e1fa5f2fd0fb2bde2f688c02ff822f21dce4da9 | 4,610 | py | Python | sample/solver/_solver_helper.py | marblejenka/sawatabi | 8940bafed202e03b9f9fce4df2229960018871c8 | [
"Apache-2.0"
] | 12 | 2020-10-11T15:41:07.000Z | 2021-11-24T15:23:17.000Z | sample/solver/_solver_helper.py | marblejenka/sawatabi | 8940bafed202e03b9f9fce4df2229960018871c8 | [
"Apache-2.0"
] | 60 | 2020-10-16T17:29:15.000Z | 2021-05-08T11:20:26.000Z | sample/solver/_solver_helper.py | marblejenka/sawatabi | 8940bafed202e03b9f9fce4df2229960018871c8 | [
"Apache-2.0"
] | 7 | 2021-02-12T04:32:00.000Z | 2021-11-24T15:23:22.000Z | # Copyright 2021 Kotaro Terada
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sawatabi
def _create_ising_model():
# Optimal solution of this ising model:
# - x[1][0] and x[1][1]: -1
# - The others: +1
# - Energy = -10.0
model = sawatabi.model.LogicalModel(mtype="ising")
# print("\nSet shape to (1, 2)")
x = model.variables("x", shape=(1, 2))
model.add_interaction(x[0, 0], coefficient=1.0)
model.add_interaction((x[0, 0], x[0, 1]), coefficient=1.0)
# print(model)
# print("\nAdd shape by (1, 0)")
x = model.append("x", shape=(1, 0))
model.add_interaction((x[0, 1], x[1, 0]), coefficient=-2.0)
model.add_interaction((x[1, 0], x[1, 1]), coefficient=3.0)
# print(model)
# print("\nAdd shape by (1, 0)")
x = model.append("x", shape=(1, 0))
model.add_interaction((x[1, 1], x[2, 0]), coefficient=-4.0)
model.add_interaction((x[2, 0], x[2, 1]), coefficient=5.0)
# print(model)
# Add offset so that the final energy becomes -10.0
model.offset(6.0)
# print("\nPhysical model")
physical = model.to_physical()
# print(physical)
return physical
def _create_qubo_model():
# Optimal solution of this qubo model:
# - Only one of the variables: 1
# - The others: 0
# - Energy = 0.0
model = sawatabi.model.LogicalModel(mtype="qubo")
# print("\nOne-hot constraint for an array of (4,)")
a = model.variables("a", shape=(4,))
constraint = sawatabi.model.constraint.NHotConstraint(variables=a, n=1)
model.add_constraint(constraint)
# print(model)
# Add offset so that the final energy becomes 0.0
model.offset(1.0)
# print("\nPhysical model")
physical = model.to_physical()
# print(physical)
return physical
def _create_simple_ising_model_with_only_1_body():
# Optimal solution of this ising model:
# - All spins: -1
# - Energy = -12.0
model = sawatabi.model.LogicalModel(mtype="ising")
x = model.variables("x", shape=(12,))
for i in range(12):
model.add_interaction(x[i], coefficient=-1.0)
return model.to_physical()
def _create_simple_ising_model_with_only_2_body():
# Optimal solution of this ising model:
# - All spins: +1 or -1
# - Energy = -11.0
model = sawatabi.model.LogicalModel(mtype="ising")
x = model.variables("x", shape=(12,))
for i in range(11):
model.add_interaction((x[i], x[i + 1]), coefficient=1.0)
return model.to_physical()
def _create_simple_2x2_ising_model_without_active_var():
# Optimal solution of this ising model:
# - All spins (except a[0][0]): +1
# - Energy = 0.0
model = sawatabi.model.LogicalModel(mtype="ising")
a = model.variables("a", shape=(2, 2))
model.add_interaction(a[0, 1], coefficient=2.0)
model.add_interaction(a[1, 0], coefficient=2.0)
model.add_interaction(a[1, 1], coefficient=2.0)
model.offset(6.0)
return model.to_physical()
def _create_simple_2x2_qubo_model_without_active_var():
# Optimal solution of this qubo model:
# - All spins (except a[0][0]): +1
# - Energy = 0.0
model = sawatabi.model.LogicalModel(mtype="qubo")
a = model.variables("b", shape=(2, 2))
model.add_interaction(a[0, 1], coefficient=2.0)
model.add_interaction(a[1, 0], coefficient=2.0)
model.add_interaction(a[1, 1], coefficient=2.0)
model.offset(6.0)
return model.to_physical()
def _print_sampleset(sampleset):
print("\nsampleset")
print(sampleset)
print("\nsampleset.info")
print(sampleset.info)
print("\nsampleset.variables")
print(sampleset.variables)
print("\nsampleset.record")
print(sampleset.record)
print("\nsampleset.record[0]")
print(sampleset.record[0])
print(sampleset.record[0].sample)
print(sampleset.record[0].energy)
print(sampleset.record[0].num_occurrences)
print("\nsampleset.vartype:")
print(sampleset.vartype)
print("\nsampleset.first:")
print(sampleset.first)
print("\nsampleset.samples():")
print(list(sampleset.samples()))
| 30.328947 | 75 | 0.65423 |
669a098586fefd1ff284cbc757a1eb4300820f53 | 2,637 | py | Python | tests/shells/test_bash.py | rubo77/thefuck | 5734412d82583c7bd6a2e91575f2eb54b77ec22d | [
"MIT"
] | null | null | null | tests/shells/test_bash.py | rubo77/thefuck | 5734412d82583c7bd6a2e91575f2eb54b77ec22d | [
"MIT"
] | null | null | null | tests/shells/test_bash.py | rubo77/thefuck | 5734412d82583c7bd6a2e91575f2eb54b77ec22d | [
"MIT"
] | 1 | 2022-03-27T03:49:07.000Z | 2022-03-27T03:49:07.000Z | # -*- coding: utf-8 -*-
import os
import pytest
from thefuck.shells import Bash
@pytest.mark.usefixtures('isfile', 'no_memoize', 'no_cache')
class TestBash(object):
@pytest.fixture
def shell(self):
return Bash()
@pytest.fixture(autouse=True)
def shell_aliases(self):
os.environ['TF_SHELL_ALIASES'] = (
'alias fuck=\'eval $(thefuck $(fc -ln -1))\'\n'
'alias l=\'ls -CF\'\n'
'alias la=\'ls -A\'\n'
'alias ll=\'ls -alF\'')
@pytest.mark.parametrize('before, after', [
('pwd', 'pwd'),
('fuck', 'eval $(thefuck $(fc -ln -1))'),
('awk', 'awk'),
('ll', 'ls -alF')])
def test_from_shell(self, before, after, shell):
assert shell.from_shell(before) == after
def test_to_shell(self, shell):
assert shell.to_shell('pwd') == 'pwd'
def test_and_(self, shell):
assert shell.and_('ls', 'cd') == 'ls && cd'
def test_get_aliases(self, shell):
assert shell.get_aliases() == {'fuck': 'eval $(thefuck $(fc -ln -1))',
'l': 'ls -CF',
'la': 'ls -A',
'll': 'ls -alF'}
def test_app_alias(self, shell):
assert 'alias fuck' in shell.app_alias('fuck')
assert 'alias FUCK' in shell.app_alias('FUCK')
assert 'thefuck' in shell.app_alias('fuck')
assert 'TF_ALIAS=fuck' in shell.app_alias('fuck')
assert 'PYTHONIOENCODING=utf-8' in shell.app_alias('fuck')
def test_app_alias_variables_correctly_set(self, shell):
alias = shell.app_alias('fuck')
assert "alias fuck='TF_CMD=$(TF_ALIAS" in alias
assert '$(TF_ALIAS=fuck PYTHONIOENCODING' in alias
assert 'PYTHONIOENCODING=utf-8 TF_SHELL_ALIASES' in alias
assert 'ALIASES=$(alias) thefuck' in alias
def test_get_history(self, history_lines, shell):
history_lines(['ls', 'rm'])
assert list(shell.get_history()) == ['ls', 'rm']
def test_split_command(self, shell):
command = 'git log -p'
command_parts = ['git', 'log', '-p']
assert shell.split_command(command) == command_parts
def test_how_to_configure(self, shell, config_exists):
config_exists.return_value = True
assert shell.how_to_configure().can_configure_automatically
def test_how_to_configure_when_config_not_found(self, shell,
config_exists):
config_exists.return_value = False
assert not shell.how_to_configure().can_configure_automatically
| 36.123288 | 78 | 0.580584 |
163d4c9dc7aae4e936b4265f6fc3909388aaa29a | 1,260 | py | Python | coverage/version.py | sbruno/coveragepy | a2b26e6753e3dc4d3c1c1889f00fd1840d945c3e | [
"Apache-2.0"
] | null | null | null | coverage/version.py | sbruno/coveragepy | a2b26e6753e3dc4d3c1c1889f00fd1840d945c3e | [
"Apache-2.0"
] | null | null | null | coverage/version.py | sbruno/coveragepy | a2b26e6753e3dc4d3c1c1889f00fd1840d945c3e | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""The version and URL for coverage.py"""
# This file is exec'ed in setup.py, don't import anything!
# Same semantics as sys.version_info.
version_info = (5, 0, 3, 'alpha', 0)
def _make_version(major, minor, micro, releaselevel, serial):
"""Create a readable version string from version_info tuple components."""
assert releaselevel in ['alpha', 'beta', 'candidate', 'final']
version = "%d.%d" % (major, minor)
if micro:
version += ".%d" % (micro,)
if releaselevel != 'final':
short = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc'}[releaselevel]
version += "%s%d" % (short, serial)
return version
def _make_url(major, minor, micro, releaselevel, serial):
"""Make the URL people should start at for this version of coverage.py."""
url = "https://coverage.readthedocs.io"
if releaselevel != 'final':
# For pre-releases, use a version-specific URL.
url += "/en/coverage-" + _make_version(major, minor, micro, releaselevel, serial)
return url
__version__ = _make_version(*version_info)
__url__ = _make_url(*version_info)
| 37.058824 | 89 | 0.66746 |
be916e10a5633280e944ab149a8a2b3cc1ecc62c | 44,159 | py | Python | aranyapythonpb/peripheral_pb2.py | arhat-dev/aranya-proto | edaa0fc66ec4e7a09da37bdf9d9daa77c77a3195 | [
"Apache-2.0"
] | null | null | null | aranyapythonpb/peripheral_pb2.py | arhat-dev/aranya-proto | edaa0fc66ec4e7a09da37bdf9d9daa77c77a3195 | [
"Apache-2.0"
] | 5 | 2020-10-10T15:37:52.000Z | 2020-12-18T21:03:25.000Z | aranyapythonpb/peripheral_pb2.py | arhat-dev/aranya-proto | edaa0fc66ec4e7a09da37bdf9d9daa77c77a3195 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: peripheral.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='peripheral.proto',
package='aranya',
syntax='proto3',
serialized_options=b'Z!arhat.dev/aranya-proto/aranyagopb',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x10peripheral.proto\x12\x06\x61ranya\"\xc0\x01\n\tTLSConfig\x12\x13\n\x0bserver_name\x18\x01 \x01(\t\x12\x1c\n\x14insecure_skip_verify\x18\x02 \x01(\x08\x12\x13\n\x0bmin_version\x18\x03 \x01(\r\x12\x13\n\x0bmax_version\x18\x04 \x01(\r\x12\x0f\n\x07\x63\x61_cert\x18\x05 \x01(\x0c\x12\x0c\n\x04\x63\x65rt\x18\x06 \x01(\x0c\x12\x0b\n\x03key\x18\x07 \x01(\x0c\x12\x15\n\rcipher_suites\x18\x08 \x03(\r\x12\x13\n\x0bnext_protos\x18\t \x03(\t\"\xaf\x01\n\x0c\x43onnectivity\x12\x0e\n\x06method\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\x12\x30\n\x06params\x18\x03 \x03(\x0b\x32 .aranya.Connectivity.ParamsEntry\x12\x1e\n\x03tls\x18\x04 \x01(\x0b\x32\x11.aranya.TLSConfig\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x93\x01\n\x13PeripheralOperation\x12\x14\n\x0coperation_id\x18\x01 \x01(\t\x12\x37\n\x06params\x18\x02 \x03(\x0b\x32\'.aranya.PeripheralOperation.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x8f\x05\n\x10PeripheralMetric\x12\x0c\n\x04name\x18\x01 \x01(\t\x12<\n\rreport_method\x18\x02 \x01(\x0e\x32%.aranya.PeripheralMetric.ReportMethod\x12\x36\n\nvalue_type\x18\x03 \x01(\x0e\x32\".aranya.PeripheralMetric.ValueType\x12I\n\x11peripheral_params\x18\x04 \x03(\x0b\x32..aranya.PeripheralMetric.PeripheralParamsEntry\x12\x15\n\rreporter_name\x18\x05 \x01(\t\x12\x45\n\x0freporter_params\x18\x06 \x03(\x0b\x32,.aranya.PeripheralMetric.ReporterParamsEntry\x1a\x37\n\x15PeripheralParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x35\n\x13ReporterParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"s\n\x0cReportMethod\x12\x1c\n\x18REPORT_WITH_NODE_METRICS\x10\x00\x12\"\n\x1eREPORT_WITH_ARHAT_CONNECTIVITY\x10\x01\x12!\n\x1dREPORT_WITH_STANDALONE_CLIENT\x10\x02\"i\n\tValueType\x12\x1e\n\x1aMETRICS_VALUE_TYPE_UNTYPED\x10\x00\x12\x1e\n\x1aMETRICS_VALUE_TYPE_COUNTER\x10\x01\x12\x1c\n\x18METRICS_VALUE_TYPE_GAUGE\x10\x02\"\xce\x01\n\x13PeripheralEnsureCmd\x12$\n\x04kind\x18\x01 \x01(\x0e\x32\x16.aranya.PeripheralType\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\'\n\tconnector\x18\x03 \x01(\x0b\x32\x14.aranya.Connectivity\x12/\n\noperations\x18\x04 \x03(\x0b\x32\x1b.aranya.PeripheralOperation\x12)\n\x07metrics\x18\x05 \x03(\x0b\x32\x18.aranya.PeripheralMetric\"-\n\x11PeripheralListCmd\x12\x18\n\x10peripheral_names\x18\x01 \x03(\t\"/\n\x13PeripheralDeleteCmd\x12\x18\n\x10peripheral_names\x18\x01 \x03(\t\"S\n\x14PeripheralOperateCmd\x12\x17\n\x0fperipheral_name\x18\x01 \x01(\t\x12\x14\n\x0coperation_id\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\"7\n\x1bPeripheralMetricsCollectCmd\x12\x18\n\x10peripheral_names\x18\x02 \x03(\t\"\x82\x01\n\x13PeripheralStatusMsg\x12$\n\x04kind\x18\x01 \x01(\x0e\x32\x16.aranya.PeripheralType\x12\x0c\n\x04name\x18\x02 \x01(\t\x12&\n\x05state\x18\x03 \x01(\x0e\x32\x17.aranya.PeripheralState\x12\x0f\n\x07message\x18\x04 \x01(\t\"K\n\x17PeripheralStatusListMsg\x12\x30\n\x0bperipherals\x18\x01 \x03(\x0b\x32\x1b.aranya.PeripheralStatusMsg\",\n\x1cPeripheralOperationResultMsg\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x0c*p\n\x0ePeripheralType\x12\x1c\n\x18_INVALID_PERIPHERAL_TYPE\x10\x00\x12\x1a\n\x16PERIPHERAL_TYPE_NORMAL\x10\x01\x12$\n PERIPHERAL_TYPE_METRICS_REPORTER\x10\x02*\xa9\x01\n\x0fPeripheralState\x12\x1c\n\x18PERIPHERAL_STATE_UNKNOWN\x10\x00\x12\x1c\n\x18PERIPHERAL_STATE_CREATED\x10\x01\x12\x1e\n\x1aPERIPHERAL_STATE_CONNECTED\x10\x02\x12\x1c\n\x18PERIPHERAL_STATE_ERRORED\x10\x03\x12\x1c\n\x18PERIPHERAL_STATE_REMOVED\x10\x04\x42#Z!arhat.dev/aranya-proto/aranyagopbb\x06proto3'
)
_PERIPHERALTYPE = _descriptor.EnumDescriptor(
name='PeripheralType',
full_name='aranya.PeripheralType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='_INVALID_PERIPHERAL_TYPE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PERIPHERAL_TYPE_NORMAL', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PERIPHERAL_TYPE_METRICS_REPORTER', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1912,
serialized_end=2024,
)
_sym_db.RegisterEnumDescriptor(_PERIPHERALTYPE)
PeripheralType = enum_type_wrapper.EnumTypeWrapper(_PERIPHERALTYPE)
_PERIPHERALSTATE = _descriptor.EnumDescriptor(
name='PeripheralState',
full_name='aranya.PeripheralState',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='PERIPHERAL_STATE_UNKNOWN', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PERIPHERAL_STATE_CREATED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PERIPHERAL_STATE_CONNECTED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PERIPHERAL_STATE_ERRORED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PERIPHERAL_STATE_REMOVED', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2027,
serialized_end=2196,
)
_sym_db.RegisterEnumDescriptor(_PERIPHERALSTATE)
PeripheralState = enum_type_wrapper.EnumTypeWrapper(_PERIPHERALSTATE)
_INVALID_PERIPHERAL_TYPE = 0
PERIPHERAL_TYPE_NORMAL = 1
PERIPHERAL_TYPE_METRICS_REPORTER = 2
PERIPHERAL_STATE_UNKNOWN = 0
PERIPHERAL_STATE_CREATED = 1
PERIPHERAL_STATE_CONNECTED = 2
PERIPHERAL_STATE_ERRORED = 3
PERIPHERAL_STATE_REMOVED = 4
_PERIPHERALMETRIC_REPORTMETHOD = _descriptor.EnumDescriptor(
name='ReportMethod',
full_name='aranya.PeripheralMetric.ReportMethod',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='REPORT_WITH_NODE_METRICS', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REPORT_WITH_ARHAT_CONNECTIVITY', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REPORT_WITH_STANDALONE_CLIENT', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=985,
serialized_end=1100,
)
_sym_db.RegisterEnumDescriptor(_PERIPHERALMETRIC_REPORTMETHOD)
_PERIPHERALMETRIC_VALUETYPE = _descriptor.EnumDescriptor(
name='ValueType',
full_name='aranya.PeripheralMetric.ValueType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='METRICS_VALUE_TYPE_UNTYPED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='METRICS_VALUE_TYPE_COUNTER', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='METRICS_VALUE_TYPE_GAUGE', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1102,
serialized_end=1207,
)
_sym_db.RegisterEnumDescriptor(_PERIPHERALMETRIC_VALUETYPE)
_TLSCONFIG = _descriptor.Descriptor(
name='TLSConfig',
full_name='aranya.TLSConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='server_name', full_name='aranya.TLSConfig.server_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='insecure_skip_verify', full_name='aranya.TLSConfig.insecure_skip_verify', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='min_version', full_name='aranya.TLSConfig.min_version', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_version', full_name='aranya.TLSConfig.max_version', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ca_cert', full_name='aranya.TLSConfig.ca_cert', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cert', full_name='aranya.TLSConfig.cert', index=5,
number=6, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='key', full_name='aranya.TLSConfig.key', index=6,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cipher_suites', full_name='aranya.TLSConfig.cipher_suites', index=7,
number=8, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_protos', full_name='aranya.TLSConfig.next_protos', index=8,
number=9, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=29,
serialized_end=221,
)
_CONNECTIVITY_PARAMSENTRY = _descriptor.Descriptor(
name='ParamsEntry',
full_name='aranya.Connectivity.ParamsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='aranya.Connectivity.ParamsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='aranya.Connectivity.ParamsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=354,
serialized_end=399,
)
_CONNECTIVITY = _descriptor.Descriptor(
name='Connectivity',
full_name='aranya.Connectivity',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='method', full_name='aranya.Connectivity.method', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='target', full_name='aranya.Connectivity.target', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='params', full_name='aranya.Connectivity.params', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tls', full_name='aranya.Connectivity.tls', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_CONNECTIVITY_PARAMSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=224,
serialized_end=399,
)
_PERIPHERALOPERATION_PARAMSENTRY = _descriptor.Descriptor(
name='ParamsEntry',
full_name='aranya.PeripheralOperation.ParamsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='aranya.PeripheralOperation.ParamsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='aranya.PeripheralOperation.ParamsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=354,
serialized_end=399,
)
_PERIPHERALOPERATION = _descriptor.Descriptor(
name='PeripheralOperation',
full_name='aranya.PeripheralOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='operation_id', full_name='aranya.PeripheralOperation.operation_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='params', full_name='aranya.PeripheralOperation.params', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_PERIPHERALOPERATION_PARAMSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=402,
serialized_end=549,
)
_PERIPHERALMETRIC_PERIPHERALPARAMSENTRY = _descriptor.Descriptor(
name='PeripheralParamsEntry',
full_name='aranya.PeripheralMetric.PeripheralParamsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='aranya.PeripheralMetric.PeripheralParamsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='aranya.PeripheralMetric.PeripheralParamsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=873,
serialized_end=928,
)
_PERIPHERALMETRIC_REPORTERPARAMSENTRY = _descriptor.Descriptor(
name='ReporterParamsEntry',
full_name='aranya.PeripheralMetric.ReporterParamsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='aranya.PeripheralMetric.ReporterParamsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='aranya.PeripheralMetric.ReporterParamsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=930,
serialized_end=983,
)
_PERIPHERALMETRIC = _descriptor.Descriptor(
name='PeripheralMetric',
full_name='aranya.PeripheralMetric',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='aranya.PeripheralMetric.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='report_method', full_name='aranya.PeripheralMetric.report_method', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value_type', full_name='aranya.PeripheralMetric.value_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='peripheral_params', full_name='aranya.PeripheralMetric.peripheral_params', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reporter_name', full_name='aranya.PeripheralMetric.reporter_name', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reporter_params', full_name='aranya.PeripheralMetric.reporter_params', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_PERIPHERALMETRIC_PERIPHERALPARAMSENTRY, _PERIPHERALMETRIC_REPORTERPARAMSENTRY, ],
enum_types=[
_PERIPHERALMETRIC_REPORTMETHOD,
_PERIPHERALMETRIC_VALUETYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=552,
serialized_end=1207,
)
_PERIPHERALENSURECMD = _descriptor.Descriptor(
name='PeripheralEnsureCmd',
full_name='aranya.PeripheralEnsureCmd',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='kind', full_name='aranya.PeripheralEnsureCmd.kind', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='aranya.PeripheralEnsureCmd.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='connector', full_name='aranya.PeripheralEnsureCmd.connector', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='operations', full_name='aranya.PeripheralEnsureCmd.operations', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metrics', full_name='aranya.PeripheralEnsureCmd.metrics', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1210,
serialized_end=1416,
)
_PERIPHERALLISTCMD = _descriptor.Descriptor(
name='PeripheralListCmd',
full_name='aranya.PeripheralListCmd',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='peripheral_names', full_name='aranya.PeripheralListCmd.peripheral_names', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1418,
serialized_end=1463,
)
_PERIPHERALDELETECMD = _descriptor.Descriptor(
name='PeripheralDeleteCmd',
full_name='aranya.PeripheralDeleteCmd',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='peripheral_names', full_name='aranya.PeripheralDeleteCmd.peripheral_names', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1465,
serialized_end=1512,
)
_PERIPHERALOPERATECMD = _descriptor.Descriptor(
name='PeripheralOperateCmd',
full_name='aranya.PeripheralOperateCmd',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='peripheral_name', full_name='aranya.PeripheralOperateCmd.peripheral_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='operation_id', full_name='aranya.PeripheralOperateCmd.operation_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data', full_name='aranya.PeripheralOperateCmd.data', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1514,
serialized_end=1597,
)
_PERIPHERALMETRICSCOLLECTCMD = _descriptor.Descriptor(
name='PeripheralMetricsCollectCmd',
full_name='aranya.PeripheralMetricsCollectCmd',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='peripheral_names', full_name='aranya.PeripheralMetricsCollectCmd.peripheral_names', index=0,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1599,
serialized_end=1654,
)
_PERIPHERALSTATUSMSG = _descriptor.Descriptor(
name='PeripheralStatusMsg',
full_name='aranya.PeripheralStatusMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='kind', full_name='aranya.PeripheralStatusMsg.kind', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='aranya.PeripheralStatusMsg.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='aranya.PeripheralStatusMsg.state', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='message', full_name='aranya.PeripheralStatusMsg.message', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1657,
serialized_end=1787,
)
_PERIPHERALSTATUSLISTMSG = _descriptor.Descriptor(
name='PeripheralStatusListMsg',
full_name='aranya.PeripheralStatusListMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='peripherals', full_name='aranya.PeripheralStatusListMsg.peripherals', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1789,
serialized_end=1864,
)
_PERIPHERALOPERATIONRESULTMSG = _descriptor.Descriptor(
name='PeripheralOperationResultMsg',
full_name='aranya.PeripheralOperationResultMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='data', full_name='aranya.PeripheralOperationResultMsg.data', index=0,
number=1, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1866,
serialized_end=1910,
)
_CONNECTIVITY_PARAMSENTRY.containing_type = _CONNECTIVITY
_CONNECTIVITY.fields_by_name['params'].message_type = _CONNECTIVITY_PARAMSENTRY
_CONNECTIVITY.fields_by_name['tls'].message_type = _TLSCONFIG
_PERIPHERALOPERATION_PARAMSENTRY.containing_type = _PERIPHERALOPERATION
_PERIPHERALOPERATION.fields_by_name['params'].message_type = _PERIPHERALOPERATION_PARAMSENTRY
_PERIPHERALMETRIC_PERIPHERALPARAMSENTRY.containing_type = _PERIPHERALMETRIC
_PERIPHERALMETRIC_REPORTERPARAMSENTRY.containing_type = _PERIPHERALMETRIC
_PERIPHERALMETRIC.fields_by_name['report_method'].enum_type = _PERIPHERALMETRIC_REPORTMETHOD
_PERIPHERALMETRIC.fields_by_name['value_type'].enum_type = _PERIPHERALMETRIC_VALUETYPE
_PERIPHERALMETRIC.fields_by_name['peripheral_params'].message_type = _PERIPHERALMETRIC_PERIPHERALPARAMSENTRY
_PERIPHERALMETRIC.fields_by_name['reporter_params'].message_type = _PERIPHERALMETRIC_REPORTERPARAMSENTRY
_PERIPHERALMETRIC_REPORTMETHOD.containing_type = _PERIPHERALMETRIC
_PERIPHERALMETRIC_VALUETYPE.containing_type = _PERIPHERALMETRIC
_PERIPHERALENSURECMD.fields_by_name['kind'].enum_type = _PERIPHERALTYPE
_PERIPHERALENSURECMD.fields_by_name['connector'].message_type = _CONNECTIVITY
_PERIPHERALENSURECMD.fields_by_name['operations'].message_type = _PERIPHERALOPERATION
_PERIPHERALENSURECMD.fields_by_name['metrics'].message_type = _PERIPHERALMETRIC
_PERIPHERALSTATUSMSG.fields_by_name['kind'].enum_type = _PERIPHERALTYPE
_PERIPHERALSTATUSMSG.fields_by_name['state'].enum_type = _PERIPHERALSTATE
_PERIPHERALSTATUSLISTMSG.fields_by_name['peripherals'].message_type = _PERIPHERALSTATUSMSG
DESCRIPTOR.message_types_by_name['TLSConfig'] = _TLSCONFIG
DESCRIPTOR.message_types_by_name['Connectivity'] = _CONNECTIVITY
DESCRIPTOR.message_types_by_name['PeripheralOperation'] = _PERIPHERALOPERATION
DESCRIPTOR.message_types_by_name['PeripheralMetric'] = _PERIPHERALMETRIC
DESCRIPTOR.message_types_by_name['PeripheralEnsureCmd'] = _PERIPHERALENSURECMD
DESCRIPTOR.message_types_by_name['PeripheralListCmd'] = _PERIPHERALLISTCMD
DESCRIPTOR.message_types_by_name['PeripheralDeleteCmd'] = _PERIPHERALDELETECMD
DESCRIPTOR.message_types_by_name['PeripheralOperateCmd'] = _PERIPHERALOPERATECMD
DESCRIPTOR.message_types_by_name['PeripheralMetricsCollectCmd'] = _PERIPHERALMETRICSCOLLECTCMD
DESCRIPTOR.message_types_by_name['PeripheralStatusMsg'] = _PERIPHERALSTATUSMSG
DESCRIPTOR.message_types_by_name['PeripheralStatusListMsg'] = _PERIPHERALSTATUSLISTMSG
DESCRIPTOR.message_types_by_name['PeripheralOperationResultMsg'] = _PERIPHERALOPERATIONRESULTMSG
DESCRIPTOR.enum_types_by_name['PeripheralType'] = _PERIPHERALTYPE
DESCRIPTOR.enum_types_by_name['PeripheralState'] = _PERIPHERALSTATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TLSConfig = _reflection.GeneratedProtocolMessageType('TLSConfig', (_message.Message,), {
'DESCRIPTOR' : _TLSCONFIG,
'__module__' : 'peripheral_pb2'
# @@protoc_insertion_point(class_scope:aranya.TLSConfig)
})
_sym_db.RegisterMessage(TLSConfig)
Connectivity = _reflection.GeneratedProtocolMessageType('Connectivity', (_message.Message,), {
'ParamsEntry' : _reflection.GeneratedProtocolMessageType('ParamsEntry', (_message.Message,), {
'DESCRIPTOR' : _CONNECTIVITY_PARAMSENTRY,
'__module__' : 'peripheral_pb2'
# @@protoc_insertion_point(class_scope:aranya.Connectivity.ParamsEntry)
})
,
'DESCRIPTOR' : _CONNECTIVITY,
'__module__' : 'peripheral_pb2'
# @@protoc_insertion_point(class_scope:aranya.Connectivity)
})
_sym_db.RegisterMessage(Connectivity)
_sym_db.RegisterMessage(Connectivity.ParamsEntry)
PeripheralOperation = _reflection.GeneratedProtocolMessageType('PeripheralOperation', (_message.Message,), {
'ParamsEntry' : _reflection.GeneratedProtocolMessageType('ParamsEntry', (_message.Message,), {
'DESCRIPTOR' : _PERIPHERALOPERATION_PARAMSENTRY,
'__module__' : 'peripheral_pb2'
# @@protoc_insertion_point(class_scope:aranya.PeripheralOperation.ParamsEntry)
})
,
'DESCRIPTOR' : _PERIPHERALOPERATION,
'__module__' : 'peripheral_pb2'
# @@protoc_insertion_point(class_scope:aranya.PeripheralOperation)
})
_sym_db.RegisterMessage(PeripheralOperation)
_sym_db.RegisterMessage(PeripheralOperation.ParamsEntry)
PeripheralMetric = _reflection.GeneratedProtocolMessageType('PeripheralMetric', (_message.Message,), {
'PeripheralParamsEntry' : _reflection.GeneratedProtocolMessageType('PeripheralParamsEntry', (_message.Message,), {
'DESCRIPTOR' : _PERIPHERALMETRIC_PERIPHERALPARAMSENTRY,
'__module__' : 'peripheral_pb2'
# @@protoc_insertion_point(class_scope:aranya.PeripheralMetric.PeripheralParamsEntry)
})
,
'ReporterParamsEntry' : _reflection.GeneratedProtocolMessageType('ReporterParamsEntry', (_message.Message,), {
'DESCRIPTOR' : _PERIPHERALMETRIC_REPORTERPARAMSENTRY,
'__module__' : 'peripheral_pb2'
# @@protoc_insertion_point(class_scope:aranya.PeripheralMetric.ReporterParamsEntry)
})
,
'DESCRIPTOR' : _PERIPHERALMETRIC,
'__module__' : 'peripheral_pb2'
# @@protoc_insertion_point(class_scope:aranya.PeripheralMetric)
})
_sym_db.RegisterMessage(PeripheralMetric)
_sym_db.RegisterMessage(PeripheralMetric.PeripheralParamsEntry)
_sym_db.RegisterMessage(PeripheralMetric.ReporterParamsEntry)
PeripheralEnsureCmd = _reflection.GeneratedProtocolMessageType('PeripheralEnsureCmd', (_message.Message,), {
'DESCRIPTOR' : _PERIPHERALENSURECMD,
'__module__' : 'peripheral_pb2'
# @@protoc_insertion_point(class_scope:aranya.PeripheralEnsureCmd)
})
_sym_db.RegisterMessage(PeripheralEnsureCmd)
PeripheralListCmd = _reflection.GeneratedProtocolMessageType('PeripheralListCmd', (_message.Message,), {
'DESCRIPTOR' : _PERIPHERALLISTCMD,
'__module__' : 'peripheral_pb2'
# @@protoc_insertion_point(class_scope:aranya.PeripheralListCmd)
})
_sym_db.RegisterMessage(PeripheralListCmd)
PeripheralDeleteCmd = _reflection.GeneratedProtocolMessageType('PeripheralDeleteCmd', (_message.Message,), {
'DESCRIPTOR' : _PERIPHERALDELETECMD,
'__module__' : 'peripheral_pb2'
# @@protoc_insertion_point(class_scope:aranya.PeripheralDeleteCmd)
})
_sym_db.RegisterMessage(PeripheralDeleteCmd)
PeripheralOperateCmd = _reflection.GeneratedProtocolMessageType('PeripheralOperateCmd', (_message.Message,), {
'DESCRIPTOR' : _PERIPHERALOPERATECMD,
'__module__' : 'peripheral_pb2'
# @@protoc_insertion_point(class_scope:aranya.PeripheralOperateCmd)
})
_sym_db.RegisterMessage(PeripheralOperateCmd)
PeripheralMetricsCollectCmd = _reflection.GeneratedProtocolMessageType('PeripheralMetricsCollectCmd', (_message.Message,), {
'DESCRIPTOR' : _PERIPHERALMETRICSCOLLECTCMD,
'__module__' : 'peripheral_pb2'
# @@protoc_insertion_point(class_scope:aranya.PeripheralMetricsCollectCmd)
})
_sym_db.RegisterMessage(PeripheralMetricsCollectCmd)
PeripheralStatusMsg = _reflection.GeneratedProtocolMessageType('PeripheralStatusMsg', (_message.Message,), {
'DESCRIPTOR' : _PERIPHERALSTATUSMSG,
'__module__' : 'peripheral_pb2'
# @@protoc_insertion_point(class_scope:aranya.PeripheralStatusMsg)
})
_sym_db.RegisterMessage(PeripheralStatusMsg)
PeripheralStatusListMsg = _reflection.GeneratedProtocolMessageType('PeripheralStatusListMsg', (_message.Message,), {
'DESCRIPTOR' : _PERIPHERALSTATUSLISTMSG,
'__module__' : 'peripheral_pb2'
# @@protoc_insertion_point(class_scope:aranya.PeripheralStatusListMsg)
})
_sym_db.RegisterMessage(PeripheralStatusListMsg)
PeripheralOperationResultMsg = _reflection.GeneratedProtocolMessageType('PeripheralOperationResultMsg', (_message.Message,), {
'DESCRIPTOR' : _PERIPHERALOPERATIONRESULTMSG,
'__module__' : 'peripheral_pb2'
# @@protoc_insertion_point(class_scope:aranya.PeripheralOperationResultMsg)
})
_sym_db.RegisterMessage(PeripheralOperationResultMsg)
DESCRIPTOR._options = None
_CONNECTIVITY_PARAMSENTRY._options = None
_PERIPHERALOPERATION_PARAMSENTRY._options = None
_PERIPHERALMETRIC_PERIPHERALPARAMSENTRY._options = None
_PERIPHERALMETRIC_REPORTERPARAMSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 42.176695 | 3,688 | 0.766797 |
f7b08cce1e8ed5b1493c31b841554b24b9583bb6 | 1,410 | py | Python | tests/test_version.py | maurelian/mythx-cli | 5da00777429a40f30cb2c1b4703f1b1560a91ecb | [
"MIT"
] | null | null | null | tests/test_version.py | maurelian/mythx-cli | 5da00777429a40f30cb2c1b4703f1b1560a91ecb | [
"MIT"
] | null | null | null | tests/test_version.py | maurelian/mythx-cli | 5da00777429a40f30cb2c1b4703f1b1560a91ecb | [
"MIT"
] | null | null | null | import json
from unittest.mock import patch
from click.testing import CliRunner
from mythx_models.response import VersionResponse
from mythx_cli.cli import cli
from .common import get_test_case, mock_context
VERSION_RESPONSE = get_test_case("testdata/version-response.json", VersionResponse)
VERSION_SIMPLE = get_test_case("testdata/version-simple.txt", raw=True)
VERSION_TABLE = get_test_case("testdata/version-table.txt", raw=True)
def test_version_tabular():
runner = CliRunner()
with mock_context():
result = runner.invoke(cli, ["version"])
assert result.output == VERSION_TABLE
assert result.exit_code == 0
def test_version_json():
runner = CliRunner()
with mock_context():
result = runner.invoke(cli, ["--format", "json", "version"])
assert json.loads(result.output) == VERSION_RESPONSE.to_dict()
assert result.exit_code == 0
def test_version_json_pretty():
runner = CliRunner()
with mock_context():
result = runner.invoke(cli, ["--format", "json-pretty", "version"])
assert json.loads(result.output) == VERSION_RESPONSE.to_dict()
assert result.exit_code == 0
def test_version_simple():
runner = CliRunner()
with mock_context():
result = runner.invoke(cli, ["--format", "simple", "version"])
assert result.output == VERSION_SIMPLE
assert result.exit_code == 0
| 28.2 | 83 | 0.695745 |
f43bc3e0cf4fa52507248fd6f4e6f2292501de73 | 5,201 | py | Python | sles/src/stack/images/common/sles-stacki.img-patches/opt/stack/bin/fix_fstab.py | knutsonchris/stacki | 33087dd5fa311984a66ccecfeee6f9c2c25f665d | [
"BSD-3-Clause"
] | 123 | 2015-05-12T23:36:45.000Z | 2017-07-05T23:26:57.000Z | sles/src/stack/images/common/sles-stacki.img-patches/opt/stack/bin/fix_fstab.py | knutsonchris/stacki | 33087dd5fa311984a66ccecfeee6f9c2c25f665d | [
"BSD-3-Clause"
] | 177 | 2015-06-05T19:17:47.000Z | 2017-07-07T17:57:24.000Z | sles/src/stack/images/common/sles-stacki.img-patches/opt/stack/bin/fix_fstab.py | knutsonchris/stacki | 33087dd5fa311984a66ccecfeee6f9c2c25f665d | [
"BSD-3-Clause"
] | 32 | 2015-06-07T02:25:03.000Z | 2017-06-23T07:35:35.000Z | #!/opt/stack/bin/python3 -E
#
# @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
"""
Called after autoyast does RPM installs.
Fixes the autoyast partitioning if nukedisks=False
Replaces UUID with LABEL then saves variable 'partitions_to_label' for fix_partition to use later
Merges the old fstab that contains unformatted existing partitions with the new fstab from yast.
Called Python with -E, as that's super subtle
"""
import sys
import subprocess
import os
import fileinput
import re
import shutil
try:
sys.path.append('/tmp')
from fstab_info import old_fstab
except ModuleNotFoundError:
# If the file isn't there to import then we didn't do a nukedisks=false with labels
sys.exit(0)
except ImportError:
sys.exit(0)
old_fstab_file = '/tmp/fstab_info/fstab'
new_fstab_file = '/mnt/etc/fstab'
tmp_fstab_file = '/tmp/fstab_info/tmp_fstab'
def get_host_partition_devices(detected_disks):
"""
Returns the device names of all the partitions on a specific disk
"""
devices = []
p = subprocess.Popen(
['lsblk', '-nrio', 'NAME', '/dev/%s' % detected_disks],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
o = p.communicate()[0]
out = o.decode()
for l in out.split('\n'):
# Ignore empty lines
if not l.strip():
continue
# Skip read-only and removable devices
arr = l.split()
diskname = arr[0].strip()
if diskname != detected_disks:
devices.append(diskname)
return devices
def get_host_fstab():
"""Get contents of /etc/fstab by mounting all disks
and checking if /etc/fstab exists.
"""
host_fstab = []
if os.path.exists(new_fstab_file):
with open(new_fstab_file) as file:
for line in file.readlines():
entry = {}
# Yank out any comments in fstab:
if '#' in line:
line = line.split('#')[0]
split_line = line.split()
if len(split_line) < 3:
continue
entry['device'] = split_line[0].strip()
entry['mountpoint'] = split_line[1].strip()
entry['fstype'] = split_line[2].strip()
host_fstab.append(entry)
return host_fstab
def get_existing_labels(yast_fstab, existing_fstab):
"""Compare the two fstab inputs to determine which didn't have their LABEL= applied from autoyast.
Returns a new list of dictionaries that contains the new identifier and the fstype"""
no_labels = []
existing_labels = []
new_data = {}
for mount in yast_fstab:
if 'label' not in mount['device'].lower():
# Create list to check against old_fstab
no_labels.append(mount['mountpoint'])
# Capture new data based on mountpoint key
new_data[mount['mountpoint']] = [mount['device'], mount['fstype']]
for mount in existing_fstab:
if 'label' in mount['device'].lower() and mount['mountpoint'] in no_labels:
if mount['fstype'] != new_data[mount['mountpoint']][1]:
print("fstype changed during reinstall!")
else:
mount['new_uuid'] = new_data[mount['mountpoint']][0]
mount['new_fstype'] = new_data[mount['mountpoint']][1]
existing_labels.append(mount)
return existing_labels
def edit_new_fstab(partitions_to_label):
"""Edit the /mnt/etc/fstab to replace the UUID= with LABEL=."""
for partition in partitions_to_label:
if len(partition) == 5:
find = partition['new_uuid']
replace = partition['device']
with fileinput.FileInput(new_fstab_file, inplace=True) as fstab:
for line in fstab:
if find in line:
print(line.replace(find, replace), end='')
# leave the line alone
else:
print(line, end='')
def edit_old_fstab(yast_fstab, existing_fstab):
"""Remove any partitions from the existing fstab if they exist in the yast generated fstab.
We determine that they already exist by keying off the mount point."""
new_mount_points = []
for entry in yast_fstab:
new_mount_points.append(entry['mountpoint'])
for entry in existing_fstab:
if entry['mountpoint'] in new_mount_points:
remove = r'^' + re.escape(entry['device']) + r'.*' + re.escape(entry['mountpoint']) + r'.*'
# remove = r'^' + entry['device'] + '.*' + entry['mountpoint'] + '.*'
with fileinput.FileInput(old_fstab_file, inplace=True) as fstab:
for line in fstab:
if not re.search(remove, line):
print(line, end='')
def merge_fstabs():
"""After editing the old and new fstab, merge them together to contain all needed data."""
with open(tmp_fstab_file, 'w+b') as new_file:
for each_file in [new_fstab_file, old_fstab_file]:
with open(each_file, 'rb') as old_file:
shutil.copyfileobj(old_file, new_file)
def main():
"""Main function."""
new_fstab = get_host_fstab()
partitions_to_label = get_existing_labels(new_fstab, old_fstab)
edit_old_fstab(new_fstab, old_fstab)
edit_new_fstab(partitions_to_label)
merge_fstabs()
shutil.copy(tmp_fstab_file, new_fstab_file)
# Need output of the partitions_to_label to be utilized for post autoyast script.
if not os.path.exists('/tmp/fstab_info'):
os.makedirs('/tmp/fstab_info')
with open('/tmp/fstab_info/__init__.py', 'a') as fstab_info:
fstab_info.write('partitions_to_label = %s\n\n' % partitions_to_label)
if __name__ == "__main__":
main() | 31.331325 | 99 | 0.713132 |
a1975c2c10580e9fb9fd7b520c58766c458256b9 | 23,268 | py | Python | c7n/resources/sagemaker.py | yayitserica/cloud-custodian | de24142f6e70493acfbf9f468b414dd5cc492a76 | [
"Apache-2.0"
] | null | null | null | c7n/resources/sagemaker.py | yayitserica/cloud-custodian | de24142f6e70493acfbf9f468b414dd5cc492a76 | [
"Apache-2.0"
] | null | null | null | c7n/resources/sagemaker.py | yayitserica/cloud-custodian | de24142f6e70493acfbf9f468b414dd5cc492a76 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import six
from c7n.actions import BaseAction
from c7n.exceptions import PolicyValidationError
from c7n.manager import resources
from c7n.query import QueryResourceManager, TypeInfo
from c7n.utils import local_session, type_schema
from c7n.tags import RemoveTag, Tag, TagActionFilter, TagDelayedAction
from c7n.filters.vpc import SubnetFilter, SecurityGroupFilter
@resources.register('sagemaker-notebook')
class NotebookInstance(QueryResourceManager):
class resource_type(TypeInfo):
service = 'sagemaker'
enum_spec = ('list_notebook_instances', 'NotebookInstances', None)
detail_spec = (
'describe_notebook_instance', 'NotebookInstanceName',
'NotebookInstanceName', None)
arn = id = 'NotebookInstanceArn'
name = 'NotebookInstanceName'
date = 'CreationTime'
permissions = ('sagemaker:ListTags',)
def augment(self, resources):
client = local_session(self.session_factory).client('sagemaker')
def _augment(r):
# List tags for the Notebook-Instance & set as attribute
tags = self.retry(client.list_tags,
ResourceArn=r['NotebookInstanceArn'])['Tags']
r['Tags'] = tags
return r
# Describe notebook-instance & then list tags
resources = super(NotebookInstance, self).augment(resources)
return list(map(_augment, resources))
NotebookInstance.filter_registry.register('marked-for-op', TagActionFilter)
@resources.register('sagemaker-job')
class SagemakerJob(QueryResourceManager):
class resource_type(TypeInfo):
service = 'sagemaker'
enum_spec = ('list_training_jobs', 'TrainingJobSummaries', None)
detail_spec = (
'describe_training_job', 'TrainingJobName', 'TrainingJobName', None)
arn = id = 'TrainingJobArn'
name = 'TrainingJobName'
date = 'CreationTime'
permissions = (
'sagemaker:ListTrainingJobs', 'sagemaker:DescribeTrainingJobs',
'sagemaker:ListTags')
def __init__(self, ctx, data):
super(SagemakerJob, self).__init__(ctx, data)
self.queries = QueryFilter.parse(
self.data.get('query', [
{'StatusEquals': 'InProgress'}]))
def resources(self, query=None):
for q in self.queries:
if q is None:
continue
query = query or {}
for k, v in q.items():
query[k] = v
return super(SagemakerJob, self).resources(query=query)
def augment(self, jobs):
client = local_session(self.session_factory).client('sagemaker')
def _augment(j):
tags = self.retry(client.list_tags,
ResourceArn=j['TrainingJobArn'])['Tags']
j['Tags'] = tags
return j
jobs = super(SagemakerJob, self).augment(jobs)
return list(map(_augment, jobs))
@resources.register('sagemaker-transform-job')
class SagemakerTransformJob(QueryResourceManager):
class resource_type(TypeInfo):
arn_type = "transform-job"
service = 'sagemaker'
enum_spec = ('list_transform_jobs', 'TransformJobSummaries', None)
detail_spec = (
'describe_transform_job', 'TransformJobName', 'TransformJobName', None)
arn = id = 'TransformJobArn'
name = 'TransformJobName'
date = 'CreationTime'
filter_name = 'TransformJobArn'
permissions = (
'sagemaker:ListTransformJobs', 'sagemaker:DescribeTransformJobs',
'sagemaker:ListTags')
def __init__(self, ctx, data):
super(SagemakerTransformJob, self).__init__(ctx, data)
self.queries = QueryFilter.parse(
self.data.get('query', [
{'StatusEquals': 'InProgress'}]))
def resources(self, query=None):
for q in self.queries:
if q is None:
continue
query = query or {}
for k, v in q.items():
query[k] = v
return super(SagemakerTransformJob, self).resources(query=query)
def augment(self, jobs):
client = local_session(self.session_factory).client('sagemaker')
def _augment(j):
tags = self.retry(client.list_tags,
ResourceArn=j['TransformJobArn'])['Tags']
j['Tags'] = tags
return j
return list(map(_augment, super(SagemakerTransformJob, self).augment(jobs)))
class QueryFilter(object):
JOB_FILTERS = ('StatusEquals', 'NameContains',)
@classmethod
def parse(cls, data):
results = []
names = set()
for d in data:
if not isinstance(d, dict):
raise PolicyValidationError(
"Job Query Filter Invalid structure %s" % d)
for k, v in d.items():
if isinstance(v, list):
raise ValueError(
'Job query filter invalid structure %s' % v)
query = cls(d).validate().query()
if query['Name'] in names:
# Cannot filter multiple times on the same key
continue
names.add(query['Name'])
if isinstance(query['Value'], list):
results.append({query['Name']: query['Value'][0]})
continue
results.append({query['Name']: query['Value']})
if 'StatusEquals' not in names:
# add default StatusEquals if not included
results.append({'Name': 'StatusEquals', 'Value': 'InProgress'})
return results
def __init__(self, data):
self.data = data
self.key = None
self.value = None
def validate(self):
if not len(list(self.data.keys())) == 1:
raise PolicyValidationError(
"Job Query Filter Invalid %s" % self.data)
self.key = list(self.data.keys())[0]
self.value = list(self.data.values())[0]
if self.key not in self.JOB_FILTERS and not self.key.startswith('tag:'):
raise PolicyValidationError(
"Job Query Filter invalid filter name %s" % (
self.data))
if self.value is None:
raise PolicyValidationError(
"Job Query Filters must have a value, use tag-key"
" w/ tag name as value for tag present checks"
" %s" % self.data)
return self
def query(self):
value = self.value
if isinstance(self.value, six.string_types):
value = [self.value]
return {'Name': self.key, 'Value': value}
@resources.register('sagemaker-endpoint')
class SagemakerEndpoint(QueryResourceManager):
class resource_type(TypeInfo):
service = 'sagemaker'
enum_spec = ('list_endpoints', 'Endpoints', None)
detail_spec = (
'describe_endpoint', 'EndpointName',
'EndpointName', None)
arn = id = 'EndpointArn'
name = 'EndpointName'
date = 'CreationTime'
permissions = ('sagemaker:ListTags',)
def augment(self, endpoints):
client = local_session(self.session_factory).client('sagemaker')
def _augment(e):
tags = self.retry(client.list_tags,
ResourceArn=e['EndpointArn'])['Tags']
e['Tags'] = tags
return e
# Describe endpoints & then list tags
endpoints = super(SagemakerEndpoint, self).augment(endpoints)
return list(map(_augment, endpoints))
SagemakerEndpoint.filter_registry.register('marked-for-op', TagActionFilter)
@resources.register('sagemaker-endpoint-config')
class SagemakerEndpointConfig(QueryResourceManager):
class resource_type(TypeInfo):
service = 'sagemaker'
enum_spec = ('list_endpoint_configs', 'EndpointConfigs', None)
detail_spec = (
'describe_endpoint_config', 'EndpointConfigName',
'EndpointConfigName', None)
arn = id = 'EndpointConfigArn'
name = 'EndpointConfigName'
date = 'CreationTime'
permissions = ('sagemaker:ListTags',)
def augment(self, endpoints):
client = local_session(self.session_factory).client('sagemaker')
def _augment(e):
tags = self.retry(client.list_tags,
ResourceArn=e['EndpointConfigArn'])['Tags']
e['Tags'] = tags
return e
endpoints = super(SagemakerEndpointConfig, self).augment(endpoints)
return list(map(_augment, endpoints))
SagemakerEndpointConfig.filter_registry.register('marked-for-op', TagActionFilter)
@resources.register('sagemaker-model')
class Model(QueryResourceManager):
class resource_type(TypeInfo):
service = 'sagemaker'
enum_spec = ('list_models', 'Models', None)
detail_spec = (
'describe_model', 'ModelName',
'ModelName', None)
arn = id = 'ModelArn'
name = 'ModelName'
date = 'CreationTime'
permissions = ('sagemaker:ListTags',)
def augment(self, resources):
client = local_session(self.session_factory).client('sagemaker')
def _augment(r):
tags = self.retry(client.list_tags,
ResourceArn=r['ModelArn'])['Tags']
r.setdefault('Tags', []).extend(tags)
return r
return list(map(_augment, resources))
Model.filter_registry.register('marked-for-op', TagActionFilter)
class StateTransitionFilter(object):
"""Filter instances by state.
Try to simplify construction for policy authors by automatically
filtering elements (filters or actions) to the instances states
they are valid for.
"""
valid_origin_states = ()
def filter_instance_state(self, instances, states=None):
states = states or self.valid_origin_states
orig_length = len(instances)
results = [i for i in instances
if i['NotebookInstanceStatus'] in states]
self.log.info("state filter %s %d of %d notebook instances" % (
self.__class__.__name__, len(results), orig_length))
return results
@SagemakerEndpoint.action_registry.register('tag')
@SagemakerEndpointConfig.action_registry.register('tag')
@NotebookInstance.action_registry.register('tag')
@SagemakerJob.action_registry.register('tag')
@SagemakerTransformJob.action_registry.register('tag')
@Model.action_registry.register('tag')
class TagNotebookInstance(Tag):
"""Action to create tag(s) on a SageMaker resource
(notebook-instance, endpoint, endpoint-config)
:example:
.. code-block:: yaml
policies:
- name: tag-sagemaker-notebook
resource: sagemaker-notebook
filters:
- "tag:target-tag": absent
actions:
- type: tag
key: target-tag
value: target-value
- name: tag-sagemaker-endpoint
resource: sagemaker-endpoint
filters:
- "tag:required-tag": absent
actions:
- type: tag
key: required-tag
value: required-value
- name: tag-sagemaker-endpoint-config
resource: sagemaker-endpoint-config
filters:
- "tag:required-tag": absent
actions:
- type: tag
key: required-tag
value: required-value
- name: tag-sagemaker-job
resource: sagemaker-job
filters:
- "tag:required-tag": absent
actions:
- type: tag
key: required-tag
value: required-value
"""
permissions = ('sagemaker:AddTags',)
def process_resource_set(self, client, resources, tags):
mid = self.manager.resource_type.id
for r in resources:
client.add_tags(ResourceArn=r[mid], Tags=tags)
@SagemakerEndpoint.action_registry.register('remove-tag')
@SagemakerEndpointConfig.action_registry.register('remove-tag')
@NotebookInstance.action_registry.register('remove-tag')
@SagemakerJob.action_registry.register('remove-tag')
@SagemakerTransformJob.action_registry.register('remove-tag')
@Model.action_registry.register('remove-tag')
class RemoveTagNotebookInstance(RemoveTag):
"""Remove tag(s) from SageMaker resources
(notebook-instance, endpoint, endpoint-config)
:example:
.. code-block:: yaml
policies:
- name: sagemaker-notebook-remove-tag
resource: sagemaker-notebook
filters:
- "tag:BadTag": present
actions:
- type: remove-tag
tags: ["BadTag"]
- name: sagemaker-endpoint-remove-tag
resource: sagemaker-endpoint
filters:
- "tag:expired-tag": present
actions:
- type: remove-tag
tags: ["expired-tag"]
- name: sagemaker-endpoint-config-remove-tag
resource: sagemaker-endpoint-config
filters:
- "tag:expired-tag": present
actions:
- type: remove-tag
tags: ["expired-tag"]
- name: sagemaker-job-remove-tag
resource: sagemaker-job
filters:
- "tag:expired-tag": present
actions:
- type: remove-tag
tags: ["expired-tag"]
"""
permissions = ('sagemaker:DeleteTags',)
def process_resource_set(self, client, resources, keys):
for r in resources:
client.delete_tags(ResourceArn=r[self.id_key], TagKeys=keys)
@SagemakerEndpoint.action_registry.register('mark-for-op')
@SagemakerEndpointConfig.action_registry.register('mark-for-op')
@NotebookInstance.action_registry.register('mark-for-op')
@Model.action_registry.register('mark-for-op')
class MarkNotebookInstanceForOp(TagDelayedAction):
"""Mark SageMaker resources for deferred action
(notebook-instance, endpoint, endpoint-config)
:example:
.. code-block:: yaml
policies:
- name: sagemaker-notebook-invalid-tag-stop
resource: sagemaker-notebook
filters:
- "tag:InvalidTag": present
actions:
- type: mark-for-op
op: stop
days: 1
- name: sagemaker-endpoint-failure-delete
resource: sagemaker-endpoint
filters:
- 'EndpointStatus': 'Failed'
actions:
- type: mark-for-op
op: delete
days: 1
- name: sagemaker-endpoint-config-invalid-size-delete
resource: sagemaker-notebook
filters:
- type: value
- key: ProductionVariants[].InstanceType
- value: 'ml.m4.10xlarge'
- op: contains
actions:
- type: mark-for-op
op: delete
days: 1
"""
@NotebookInstance.action_registry.register('start')
class StartNotebookInstance(BaseAction, StateTransitionFilter):
"""Start sagemaker-notebook(s)
:example:
.. code-block:: yaml
policies:
- name: start-sagemaker-notebook
resource: sagemaker-notebook
actions:
- start
"""
schema = type_schema('start')
permissions = ('sagemaker:StartNotebookInstance',)
valid_origin_states = ('Stopped',)
def process(self, resources):
resources = self.filter_instance_state(resources)
if not len(resources):
return
client = local_session(self.manager.session_factory).client('sagemaker')
for n in resources:
try:
client.start_notebook_instance(
NotebookInstanceName=n['NotebookInstanceName'])
except client.exceptions.ResourceNotFound:
pass
@NotebookInstance.action_registry.register('stop')
class StopNotebookInstance(BaseAction, StateTransitionFilter):
"""Stop sagemaker-notebook(s)
:example:
.. code-block:: yaml
policies:
- name: stop-sagemaker-notebook
resource: sagemaker-notebook
filters:
- "tag:DeleteMe": present
actions:
- stop
"""
schema = type_schema('stop')
permissions = ('sagemaker:StopNotebookInstance',)
valid_origin_states = ('InService',)
def process(self, resources):
resources = self.filter_instance_state(resources)
if not len(resources):
return
client = local_session(self.manager.session_factory).client('sagemaker')
for n in resources:
try:
client.stop_notebook_instance(
NotebookInstanceName=n['NotebookInstanceName'])
except client.exceptions.ResourceNotFound:
pass
@NotebookInstance.action_registry.register('delete')
class DeleteNotebookInstance(BaseAction, StateTransitionFilter):
"""Deletes sagemaker-notebook(s)
:example:
.. code-block:: yaml
policies:
- name: delete-sagemaker-notebook
resource: sagemaker-notebook
filters:
- "tag:DeleteMe": present
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('sagemaker:DeleteNotebookInstance',)
valid_origin_states = ('Stopped', 'Failed',)
def process(self, resources):
resources = self.filter_instance_state(resources)
if not len(resources):
return
client = local_session(self.manager.session_factory).client('sagemaker')
for n in resources:
try:
client.delete_notebook_instance(
NotebookInstanceName=n['NotebookInstanceName'])
except client.exceptions.ResourceNotFound:
pass
@NotebookInstance.filter_registry.register('security-group')
class NotebookSecurityGroupFilter(SecurityGroupFilter):
RelatedIdsExpression = "SecurityGroups[]"
@NotebookInstance.filter_registry.register('subnet')
class NotebookSubnetFilter(SubnetFilter):
RelatedIdsExpression = "SubnetId"
@Model.action_registry.register('delete')
class DeleteModel(BaseAction, StateTransitionFilter):
"""Deletes sagemaker-model(s)
:example:
.. code-block:: yaml
policies:
- name: delete-sagemaker-model
resource: sagemaker-model
filters:
- "tag:DeleteMe": present
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('sagemaker:DeleteModel',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('sagemaker')
for m in resources:
try:
client.delete_model(ModelName=m['ModelName'])
except client.exceptions.ResourceNotFound:
pass
@SagemakerJob.action_registry.register('stop')
class SagemakerJobStop(BaseAction):
"""Stops a SageMaker job
:example:
.. code-block:: yaml
policies:
- name: stop-ml-job
resource: sagemaker-job
filters:
- TrainingJobName: ml-job-10
actions:
- stop
"""
schema = type_schema('stop')
permissions = ('sagemaker:StopTrainingJob',)
def process(self, jobs):
client = local_session(self.manager.session_factory).client('sagemaker')
for j in jobs:
try:
client.stop_training_job(TrainingJobName=j['TrainingJobName'])
except client.exceptions.ResourceNotFound:
pass
@SagemakerEndpoint.action_registry.register('delete')
class SagemakerEndpointDelete(BaseAction):
"""Delete a SageMaker endpoint
:example:
.. code-block:: yaml
policies:
- name: delete-sagemaker-endpoint
resource: sagemaker-endpoint
filters:
- EndpointName: sagemaker-ep--2018-01-01-00-00-00
actions:
- type: delete
"""
permissions = (
'sagemaker:DeleteEndpoint',
'sagemaker:DeleteEndpointConfig')
schema = type_schema('delete')
def process(self, endpoints):
client = local_session(self.manager.session_factory).client('sagemaker')
for e in endpoints:
try:
client.delete_endpoint(EndpointName=e['EndpointName'])
except client.exceptions.ResourceNotFound:
pass
@SagemakerEndpointConfig.action_registry.register('delete')
class SagemakerEndpointConfigDelete(BaseAction):
"""Delete a SageMaker endpoint
:example:
.. code-block:: yaml
policies:
- name: delete-sagemaker-endpoint-config
resource: sagemaker-endpoint-config
filters:
- EndpointConfigName: sagemaker-2018-01-01-00-00-00-T00
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('sagemaker:DeleteEndpointConfig',)
def process(self, endpoints):
client = local_session(self.manager.session_factory).client('sagemaker')
for e in endpoints:
try:
client.delete_endpoint_config(
EndpointConfigName=e['EndpointConfigName'])
except client.exceptions.ResourceNotFound:
pass
@SagemakerTransformJob.action_registry.register('stop')
class SagemakerTransformJobStop(BaseAction):
"""Stops a SageMaker Transform job
:example:
.. code-block:: yaml
policies:
- name: stop-ml-job
resource: sagemaker-transform-job
filters:
- TransformJobName: ml-job-10
actions:
- stop
"""
schema = type_schema('stop')
permissions = ('sagemaker:StopTransformJob',)
def process(self, jobs):
client = local_session(self.manager.session_factory).client('sagemaker')
for j in jobs:
try:
client.stop_transform_job(TransformJobName=j['TransformJobName'])
except client.exceptions.ResourceNotFound:
pass
| 31.443243 | 84 | 0.602372 |
b3a0c106706b65930f45c447a19da68a324d1756 | 37,570 | py | Python | MainWindow.py | 4RCAN3/PG | f00f727848d64681e6abbee24c70b2740c983a1d | [
"MIT"
] | 6 | 2021-04-01T06:18:18.000Z | 2022-01-31T07:29:02.000Z | MainWindow.py | 4RCAN3/PG | f00f727848d64681e6abbee24c70b2740c983a1d | [
"MIT"
] | null | null | null | MainWindow.py | 4RCAN3/PG | f00f727848d64681e6abbee24c70b2740c983a1d | [
"MIT"
] | 3 | 2021-03-24T12:08:24.000Z | 2021-03-24T14:06:54.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainWindow.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_PasswordGenerator(object):
def setupUi(self, PasswordGenerator):
'''
Password Generator:
Main Window
'''
PasswordGenerator.setObjectName("PasswordGenerator")
PasswordGenerator.resize(569, 504)
PasswordGenerator.setAutoFillBackground(True)
QtCore.QMetaObject.connectSlotsByName(PasswordGenerator)
'''
Central Widget Settings
Scroll Area Setup
'''
self.centralwidget = QtWidgets.QWidget(PasswordGenerator)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.scrollArea = QtWidgets.QScrollArea(self.centralwidget)
self.scrollArea.setStyleSheet("background: qlineargradient(spread:pad, x1:0.006, y1:0, x2:0.745, y2:0, stop:0 rgba(0, 0, 0, 255), stop:1 rgba(0, 0, 127, 255))")
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents_9 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_9.setGeometry(QtCore.QRect(-26, 0, 618, 1318))
self.scrollAreaWidgetContents_9.setObjectName("scrollAreaWidgetContents_9")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents_9)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.frame = QtWidgets.QFrame(self.scrollAreaWidgetContents_9)
self.frame.setMinimumSize(QtCore.QSize(600, 1300))
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.widget = QtWidgets.QWidget(self.frame)
self.widget.setGeometry(QtCore.QRect(-11, -1, 1801, 3000))
self.widget.setMinimumSize(QtCore.QSize(1800, 1800))
self.widget.setAutoFillBackground(False)
self.widget.setObjectName("widget")
'''
IncludeLeetCheck:
A check box to include leet combinations for a custom word.
Settings:
Background: White (rgb: 255, 255, 255)
Text: None
Type: CheckBox
'''
self.IncludeLeetCheck = QtWidgets.QCheckBox(self.widget)
self.IncludeLeetCheck.setGeometry(QtCore.QRect(40, 70, 13, 13))
self.IncludeLeetCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.IncludeLeetCheck.setText("")
self.IncludeLeetCheck.setObjectName("IncludeLeetCheck")
'''
IncludeNumsSufCheck:
A check box to include combination of numbers as suffixes to the combinations
Settings:
Background: White (rgb: 255, 255, 255)
Text: None
Type: CheckBox
'''
self.IncludeNumsSufCheck = QtWidgets.QCheckBox(self.widget)
self.IncludeNumsSufCheck.setGeometry(QtCore.QRect(40, 370, 13, 13))
self.IncludeNumsSufCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.IncludeNumsSufCheck.setText("")
self.IncludeNumsSufCheck.setObjectName("IncludeNumsSufCheck")
'''
DefaultListLabel:
Label for including default list for the WordList.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.DefaultListLabel = QtWidgets.QLabel(self.widget)
self.DefaultListLabel.setGeometry(QtCore.QRect(160, 780, 161, 16))
self.DefaultListLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.DefaultListLabel.setWordWrap(True)
self.DefaultListLabel.setObjectName("DefaultListLabel")
'''
MaxComboLenCheck:
A spin box to set the max length for the combinatios generated by the word list.
Settings:
Background: White (rgb: 255, 255, 255)
Text: None
Type: SpinBox
'''
self.MaxComboLenCheck = QtWidgets.QSpinBox(self.widget)
self.MaxComboLenCheck.setGeometry(QtCore.QRect(40, 870, 42, 22))
self.MaxComboLenCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.MaxComboLenCheck.setObjectName("MaxComboLenCheck")
'''
AddPrefixLabel:
A label for including a prefix.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.AddPrefixLabel = QtWidgets.QLabel(self.widget)
self.AddPrefixLabel.setGeometry(QtCore.QRect(160, 230, 61, 21))
self.AddPrefixLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.AddPrefixLabel.setWordWrap(True)
self.AddPrefixLabel.setObjectName("AddPrefixLabel")
'''
IncludeNumsLabel:
A label to include numbers in the word list.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.IncludeNumsLabel = QtWidgets.QLabel(self.widget)
self.IncludeNumsLabel.setGeometry(QtCore.QRect(160, 740, 91, 16))
self.IncludeNumsLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.IncludeNumsLabel.setWordWrap(True)
self.IncludeNumsLabel.setObjectName("IncludeNumsLabel")
'''
CustomListText:
A text box to use a custom list for generating the WordList.
Settings:
Background: White (rgb: 255, 255, 255)
Text: None
Type: TextEdit
'''
self.CustomListText = QtWidgets.QTextEdit(self.widget)
self.CustomListText.setGeometry(QtCore.QRect(40, 910, 101, 31))
self.CustomListText.setStyleSheet("background: rgb(255, 255, 255)")
self.CustomListText.setObjectName("CustomListText")
'''
CustomListLabel:
A label to use a custom list for generating the WordList.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.CustomListLabel = QtWidgets.QLabel(self.widget)
self.CustomListLabel.setGeometry(QtCore.QRect(160, 910, 271, 31))
self.CustomListLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.CustomListLabel.setWordWrap(True)
self.CustomListLabel.setObjectName("CustomListLabel")
'''
MinComboLenCheck:
A spin box to set the minimum length of the combinations generated for the WordList.
Settings:
Background: White (rgb: 255, 255, 255)
Text: None
Type: SpinBox
'''
self.MinComboLenCheck = QtWidgets.QSpinBox(self.widget)
self.MinComboLenCheck.setGeometry(QtCore.QRect(40, 830, 42, 22))
self.MinComboLenCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.MinComboLenCheck.setObjectName("MinComboLenCheck")
'''
SuffixNumLenLabel:
A label to set the max length for the numerical suffixes.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.SuffixNumLenLabel = QtWidgets.QLabel(self.widget)
self.SuffixNumLenLabel.setGeometry(QtCore.QRect(160, 450, 151, 20))
self.SuffixNumLenLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.SuffixNumLenLabel.setWordWrap(True)
self.SuffixNumLenLabel.setObjectName("SuffixNumLenLabel")
'''
LeetCombinationsLabel:
A label to include leet combinations for a custom word.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.LeetCombinationsLabel = QtWidgets.QLabel(self.widget)
self.LeetCombinationsLabel.setGeometry(QtCore.QRect(160, 70, 131, 21))
self.LeetCombinationsLabel.setAutoFillBackground(False)
self.LeetCombinationsLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.LeetCombinationsLabel.setWordWrap(True)
self.LeetCombinationsLabel.setObjectName("LeetCombinationsLabel")
'''
IncludeCombosLabel:
A label to include leet combinations of the prefixes or suffixes.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.IncludeCombosLabel = QtWidgets.QLabel(self.widget)
self.IncludeCombosLabel.setGeometry(QtCore.QRect(160, 310, 211, 16))
self.IncludeCombosLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.IncludeCombosLabel.setWordWrap(True)
self.IncludeCombosLabel.setObjectName("IncludeCombosLabel")
'''
MaxComboLenLabel:
A label to set the max length for the combinations generator for the WordList.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.MaxComboLenLabel = QtWidgets.QLabel(self.widget)
self.MaxComboLenLabel.setGeometry(QtCore.QRect(160, 870, 101, 20))
self.MaxComboLenLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.MaxComboLenLabel.setWordWrap(True)
self.MaxComboLenLabel.setObjectName("MaxComboLenLabel")
'''
IncludeNumsCheck:
A check box to include numbers for generating the WordList.
Settings:
Background: White (rgb: 255, 255, 255)
Text: None
Type: CheckBox
'''
self.IncludeNumsCheck = QtWidgets.QCheckBox(self.widget)
self.IncludeNumsCheck.setGeometry(QtCore.QRect(40, 740, 13, 13))
self.IncludeNumsCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.IncludeNumsCheck.setText("")
self.IncludeNumsCheck.setObjectName("IncludeNumsCheck")
'''
GenWordListLabel:
A label to generate a WordList.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.GenWordListLabel = QtWidgets.QLabel(self.widget)
self.GenWordListLabel.setGeometry(QtCore.QRect(160, 100, 111, 16))
self.GenWordListLabel.setAutoFillBackground(False)
self.GenWordListLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.GenWordListLabel.setWordWrap(True)
self.GenWordListLabel.setObjectName("GenWordListLabel")
'''
IncludeNumsSufLabel:
A label to include numberical combinations as suffix
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.IncludeNumsSufLabel = QtWidgets.QLabel(self.widget)
self.IncludeNumsSufLabel.setGeometry(QtCore.QRect(160, 370, 131, 20))
self.IncludeNumsSufLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.IncludeNumsSufLabel.setWordWrap(True)
self.IncludeNumsSufLabel.setObjectName("IncludeNumsSufLabel")
'''
PrefixSuffixSettingsLabel:
Heading label for the prefix and suffix settings.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.PrefixSuffixSettingsLabel = QtWidgets.QLabel(self.widget)
self.PrefixSuffixSettingsLabel.setGeometry(QtCore.QRect(30, 190, 211, 21))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.PrefixSuffixSettingsLabel.setFont(font)
self.PrefixSuffixSettingsLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.PrefixSuffixSettingsLabel.setWordWrap(True)
self.PrefixSuffixSettingsLabel.setObjectName("PrefixSuffixSettingsLabel")
'''
CustomWordsText:
A text box to make combinations for a custom word.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: TextEdit
'''
self.CustomWordsText = QtWidgets.QTextEdit(self.widget)
self.CustomWordsText.setGeometry(QtCore.QRect(40, 130, 104, 31))
self.CustomWordsText.setStyleSheet("background: rgb(255, 255, 255)")
self.CustomWordsText.setObjectName("CustomWordsText")
'''
WLSettingsLabel:
Heading label for WordList settings.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.WLSettingsLabel = QtWidgets.QLabel(self.widget)
self.WLSettingsLabel.setGeometry(QtCore.QRect(30, 660, 191, 51))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.WLSettingsLabel.setFont(font)
self.WLSettingsLabel.setAutoFillBackground(False)
self.WLSettingsLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.WLSettingsLabel.setWordWrap(True)
self.WLSettingsLabel.setObjectName("WLSettingsLabel")
'''
PrefixNumLenLabel:
A label to set the max length of the numerical combinations in the prefix.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.PrefixNumLenLabel = QtWidgets.QLabel(self.widget)
self.PrefixNumLenLabel.setGeometry(QtCore.QRect(160, 410, 151, 20))
self.PrefixNumLenLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.PrefixNumLenLabel.setWordWrap(True)
self.PrefixNumLenLabel.setObjectName("PrefixNumLenLabel")
'''
IncludeCombosCheck:
A check box to include combination of prefix or suffix.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: CheckBox
'''
self.IncludeCombosCheck = QtWidgets.QCheckBox(self.widget)
self.IncludeCombosCheck.setEnabled(True)
self.IncludeCombosCheck.setGeometry(QtCore.QRect(40, 310, 13, 13))
self.IncludeCombosCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.IncludeCombosCheck.setText("")
self.IncludeCombosCheck.setChecked(False)
self.IncludeCombosCheck.setObjectName("IncludeCombosCheck")
'''
CustomWordLabel:
A label to use a custom word for generating combos.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.CustomWordsLabel = QtWidgets.QLabel(self.widget)
self.CustomWordsLabel.setGeometry(QtCore.QRect(160, 130, 231, 31))
self.CustomWordsLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.CustomWordsLabel.setWordWrap(True)
self.CustomWordsLabel.setObjectName("CustomWordsLabel")
'''
IncludeWLcheck:
A check box to generate a WordList.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: CheckBox
'''
self.IncludeWLCheck = QtWidgets.QCheckBox(self.widget)
self.IncludeWLCheck.setGeometry(QtCore.QRect(40, 100, 13, 13))
self.IncludeWLCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.IncludeWLCheck.setText("")
self.IncludeWLCheck.setChecked(False)
self.IncludeWLCheck.setObjectName("IncludeWLCheck")
'''
ExtendDefaultListText:
A text box to extend the default list (a-z) for generating the WordList.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: TextEdit
'''
self.ExtendDefaultListText = QtWidgets.QTextEdit(self.widget)
self.ExtendDefaultListText.setGeometry(QtCore.QRect(40, 960, 101, 31))
self.ExtendDefaultListText.setStyleSheet("background: rgb(255, 255, 255)")
self.ExtendDefaultListText.setObjectName("ExtendDefaultListText")
'''
GenerateButton:
A push button to generate the cominations based on settings and store the combinations in a text file.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: PushButton
'''
self.GenerateButton = QtWidgets.QPushButton(self.widget)
self.GenerateButton.setGeometry(QtCore.QRect(90, 1220, 151, 61))
self.GenerateButton.setStyleSheet("background: rgb(255, 255, 255)")
self.GenerateButton.setObjectName("GenerateButton")
'''
MinComboLenLabel:
A label to set the minimum length of the combinations being generated for the WordList.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.MinComboLenLabel = QtWidgets.QLabel(self.widget)
self.MinComboLenLabel.setGeometry(QtCore.QRect(160, 820, 91, 20))
self.MinComboLenLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.MinComboLenLabel.setWordWrap(True)
self.MinComboLenLabel.setObjectName("MinComboLenLabel")
'''
AddPrefixText:
A text box to add a text as the prefix to the combinations being generated.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: TextEdit
'''
self.AddPrefixText = QtWidgets.QTextEdit(self.widget)
self.AddPrefixText.setGeometry(QtCore.QRect(40, 220, 104, 31))
self.AddPrefixText.setStyleSheet("background: rgb(255, 255, 255)")
self.AddPrefixText.setObjectName("AddPrefixText")
self.AddSuffixText = QtWidgets.QTextEdit(self.widget)
self.AddSuffixText.setGeometry(QtCore.QRect(40, 270, 104, 31))
self.AddSuffixText.setStyleSheet("background: rgb(255, 255, 255)")
self.AddSuffixText.setObjectName("AddSuffixText")
'''
IncludeNumsPreLabel:
A label to include numerical combinations as a prefix to the combinations being generated.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.IncludeNumsPreLabel = QtWidgets.QLabel(self.widget)
self.IncludeNumsPreLabel.setGeometry(QtCore.QRect(160, 340, 131, 20))
self.IncludeNumsPreLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.IncludeNumsPreLabel.setWordWrap(True)
self.IncludeNumsPreLabel.setObjectName("IncludeNumsPreLabel")
'''
ExtandDefaultListLabel:
A label to extend the default list being used to generate the WordList.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.ExtandDefaultListLabel = QtWidgets.QLabel(self.widget)
self.ExtandDefaultListLabel.setGeometry(QtCore.QRect(160, 960, 161, 31))
self.ExtandDefaultListLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.ExtandDefaultListLabel.setWordWrap(True)
self.ExtandDefaultListLabel.setObjectName("ExtandDefaultListLabel")
'''
SuffixNumLenCheck:
A spin box to set the max length for the numerical combinations as suffix.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: SpinBox
'''
self.SuffixNumLenCheck = QtWidgets.QSpinBox(self.widget)
self.SuffixNumLenCheck.setGeometry(QtCore.QRect(40, 450, 42, 22))
self.SuffixNumLenCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.SuffixNumLenCheck.setObjectName("SuffixNumLenCheck")
'''
GenralSettingsLabel:
Heading label for the general settings.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.GenralSettingsLabel = QtWidgets.QLabel(self.widget)
self.GenralSettingsLabel.setGeometry(QtCore.QRect(30, 20, 151, 31))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.GenralSettingsLabel.setFont(font)
self.GenralSettingsLabel.setAutoFillBackground(False)
self.GenralSettingsLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.GenralSettingsLabel.setWordWrap(True)
self.GenralSettingsLabel.setObjectName("GenralSettingsLabel")
'''
PrefixNumLenCheck:
A spin box to set the max length for the numerical combinations as prefix.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: SpinBox
'''
self.PrefixNumLenCheck = QtWidgets.QSpinBox(self.widget)
self.PrefixNumLenCheck.setGeometry(QtCore.QRect(40, 410, 42, 22))
self.PrefixNumLenCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.PrefixNumLenCheck.setObjectName("PrefixNumLenCheck")
'''
DefaultListCheck:
A check box to include the default list (a-z) to be used for generating the WordList.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: CheckBox
'''
self.DefaultListCheck = QtWidgets.QCheckBox(self.widget)
self.DefaultListCheck.setGeometry(QtCore.QRect(40, 780, 13, 13))
self.DefaultListCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.DefaultListCheck.setText("")
self.DefaultListCheck.setObjectName("DefaultListCheck")
'''
IncludeNumsPreCheck:
A check box to include numerical combinations as prefix.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: CheckBox
'''
self.IncludeNumsPreCheck = QtWidgets.QCheckBox(self.widget)
self.IncludeNumsPreCheck.setGeometry(QtCore.QRect(40, 340, 13, 13))
self.IncludeNumsPreCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.IncludeNumsPreCheck.setText("")
self.IncludeNumsPreCheck.setChecked(False)
self.IncludeNumsPreCheck.setObjectName("IncludeNumsPreCheck")
'''
AddSuffixLabel:
A label to include a text as suffix.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.AddSuffixLabel = QtWidgets.QLabel(self.widget)
self.AddSuffixLabel.setGeometry(QtCore.QRect(160, 270, 71, 21))
self.AddSuffixLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.AddSuffixLabel.setWordWrap(True)
self.AddSuffixLabel.setObjectName("AddSuffixLabel")
'''
MinPreComboLenCheck:
A spin box to set the minimum length for the numerical combinations for prefix.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: SpinBox
'''
self.MinPreComboLenCheck = QtWidgets.QSpinBox(self.widget)
self.MinPreComboLenCheck.setGeometry(QtCore.QRect(40, 490, 42, 22))
self.MinPreComboLenCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.MinPreComboLenCheck.setObjectName("MinPreComboLenCheck")
#add docstring
self.MinSufComboLenCheck = QtWidgets.QSpinBox(self.widget)
self.MinSufComboLenCheck.setGeometry(QtCore.QRect(40, 530, 42, 22))
self.MinSufComboLenCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.MinSufComboLenCheck.setObjectName("MinSufComboLenCheck")
'''
MinPreComboLenLabel:
A label to set the minimum length for the numerical combinations for pefix.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.MinPreComboLenLabel = QtWidgets.QLabel(self.widget)
self.MinPreComboLenLabel.setGeometry(QtCore.QRect(160, 490, 151, 20))
self.MinPreComboLenLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.MinPreComboLenLabel.setWordWrap(True)
self.MinPreComboLenLabel.setObjectName("MinPreComboLenLabel")
'''
MinSufComboLenLabel:
A label to set the minimum length for the numercial combinations for suffix.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.MinSufComboLenLabel = QtWidgets.QLabel(self.widget)
self.MinSufComboLenLabel.setGeometry(QtCore.QRect(160, 530, 151, 20))
self.MinSufComboLenLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.MinSufComboLenLabel.setWordWrap(True)
self.MinSufComboLenLabel.setObjectName("MinSufComboLenLabel")
'''
IncludeWLpreCheck:
A check box to include a word list as prefix.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: CheckBox
'''
self.IncludeWLpreCheck = QtWidgets.QCheckBox(self.widget)
self.IncludeWLpreCheck.setGeometry(QtCore.QRect(40, 580, 13, 13))
self.IncludeWLpreCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.IncludeWLpreCheck.setText("")
self.IncludeWLpreCheck.setObjectName("IncludeWLpreCheck")
'''
IncludeWLPreLabel:
A label to include a word list as prefix.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.IncludeWLPrelabel = QtWidgets.QLabel(self.widget)
self.IncludeWLPrelabel.setGeometry(QtCore.QRect(160, 569, 141, 31))
self.IncludeWLPrelabel.setStyleSheet("background: rgb(255, 255, 255)")
self.IncludeWLPrelabel.setWordWrap(True)
self.IncludeWLPrelabel.setObjectName("IncludeWLPrelabel")
'''
IncludeWLSufCheck:
A check box to include a wordlist as prefix.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: CheckBox
'''
self.IncludeWLSufCheck = QtWidgets.QCheckBox(self.widget)
self.IncludeWLSufCheck.setGeometry(QtCore.QRect(40, 620, 13, 13))
self.IncludeWLSufCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.IncludeWLSufCheck.setText("")
self.IncludeWLSufCheck.setObjectName("IncludeWLSufCheck")
'''
IncludeWLSufLabel:
A label to include a word list as suffix.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.IncludeWLSufLabel = QtWidgets.QLabel(self.widget)
self.IncludeWLSufLabel.setGeometry(QtCore.QRect(160, 610, 141, 31))
self.IncludeWLSufLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.IncludeWLSufLabel.setWordWrap(True)
self.IncludeWLSufLabel.setObjectName("IncludeWLSufLabel")
'''
StoreComboLabel:
A label for storing combinations.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.StoreComboLabel = QtWidgets.QLabel(self.widget)
self.StoreComboLabel.setGeometry(QtCore.QRect(40, 1030, 191, 51))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.StoreComboLabel.setFont(font)
self.StoreComboLabel.setAutoFillBackground(False)
self.StoreComboLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.StoreComboLabel.setWordWrap(True)
self.StoreComboLabel.setObjectName("StoreComboLabel")
'''
UseDefTxtFileCheck:
A check box to use the default text file for storing combinations.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: CheckBox
'''
self.UseDefTxtFileCheck = QtWidgets.QCheckBox(self.widget)
self.UseDefTxtFileCheck.setGeometry(QtCore.QRect(40, 1110, 13, 13))
self.UseDefTxtFileCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.UseDefTxtFileCheck.setText("")
self.UseDefTxtFileCheck.setObjectName("UseDefTxtFileCheck")
'''
UseCustomTxtFileCheck:
A text box to use a custom file for storing the combinations.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: TextEdit
'''
self.UseCustomTxtFileCheck = QtWidgets.QTextEdit(self.widget)
self.UseCustomTxtFileCheck.setGeometry(QtCore.QRect(40, 1150, 101, 31))
self.UseCustomTxtFileCheck.setStyleSheet("background: rgb(255, 255, 255)")
self.UseCustomTxtFileCheck.setObjectName("UseCustomTxtFileCheck")
'''
UseDefTxtFileLabel:
A label to use the default text file for storing combinations.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.UseDefTxtFileLabel = QtWidgets.QLabel(self.widget)
self.UseDefTxtFileLabel.setGeometry(QtCore.QRect(160, 1100, 161, 31))
self.UseDefTxtFileLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.UseDefTxtFileLabel.setWordWrap(True)
self.UseDefTxtFileLabel.setObjectName("UseDefTxtFileLabel")
'''
UseCustomTxtFileLabel:
A label to use a custom text file for storing combinations.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: Label
'''
self.UseCustomTxtFileLabel = QtWidgets.QLabel(self.widget)
self.UseCustomTxtFileLabel.setGeometry(QtCore.QRect(160, 1150, 161, 31))
self.UseCustomTxtFileLabel.setStyleSheet("background: rgb(255, 255, 255)")
self.UseCustomTxtFileLabel.setWordWrap(True)
self.UseCustomTxtFileLabel.setObjectName("UseCustomTxtFileLabel")
'''
pushButton:
A button for opening the help section.
Settings:
Background: White (rgb: 255, 255, 255)
Text:
Type: PushButton
'''
self.pushButton = QtWidgets.QPushButton(self.widget)
self.pushButton.setGeometry(QtCore.QRect(434, 20, 91, 31))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.pushButton.setFont(font)
self.pushButton.setStyleSheet("background: rgb(255, 255, 255)")
self.pushButton.setObjectName("pushButton")
self.IncludeLeetCheck.raise_()
self.IncludeNumsSufCheck.raise_()
self.DefaultListLabel.raise_()
self.MaxComboLenCheck.raise_()
self.AddPrefixLabel.raise_()
self.IncludeNumsLabel.raise_()
self.CustomListText.raise_()
self.CustomListLabel.raise_()
self.MinComboLenCheck.raise_()
self.SuffixNumLenLabel.raise_()
self.LeetCombinationsLabel.raise_()
self.IncludeCombosLabel.raise_()
self.MaxComboLenLabel.raise_()
self.IncludeNumsCheck.raise_()
self.GenWordListLabel.raise_()
self.IncludeNumsSufLabel.raise_()
self.PrefixSuffixSettingsLabel.raise_()
self.CustomWordsText.raise_()
self.WLSettingsLabel.raise_()
self.PrefixNumLenLabel.raise_()
self.IncludeCombosCheck.raise_()
self.CustomWordsLabel.raise_()
self.IncludeWLCheck.raise_()
self.ExtendDefaultListText.raise_()
self.GenerateButton.raise_()
self.MinComboLenLabel.raise_()
self.AddPrefixText.raise_()
self.AddSuffixText.raise_()
self.IncludeNumsPreLabel.raise_()
self.ExtandDefaultListLabel.raise_()
self.SuffixNumLenCheck.raise_()
self.PrefixNumLenCheck.raise_()
self.DefaultListCheck.raise_()
self.IncludeNumsPreCheck.raise_()
self.AddSuffixLabel.raise_()
self.GenralSettingsLabel.raise_()
self.MinPreComboLenCheck.raise_()
self.MinSufComboLenCheck.raise_()
self.MinPreComboLenLabel.raise_()
self.MinSufComboLenLabel.raise_()
self.IncludeWLpreCheck.raise_()
self.IncludeWLPrelabel.raise_()
self.IncludeWLSufCheck.raise_()
self.IncludeWLSufLabel.raise_()
self.StoreComboLabel.raise_()
self.UseDefTxtFileCheck.raise_()
self.UseCustomTxtFileCheck.raise_()
self.UseDefTxtFileLabel.raise_()
self.UseCustomTxtFileLabel.raise_()
self.pushButton.raise_()
self.verticalLayout_2.addWidget(self.frame)
self.scrollArea.setWidget(self.scrollAreaWidgetContents_9)
self.verticalLayout.addWidget(self.scrollArea)
PasswordGenerator.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(PasswordGenerator)
self.statusbar.setObjectName("statusbar")
PasswordGenerator.setStatusBar(self.statusbar)
self.menubar = QtWidgets.QMenuBar(PasswordGenerator)
self.menubar.setGeometry(QtCore.QRect(0, 0, 569, 21))
self.menubar.setObjectName("menubar")
PasswordGenerator.setMenuBar(self.menubar)
self.retranslateUi(PasswordGenerator)
QtCore.QMetaObject.connectSlotsByName(PasswordGenerator)
def retranslateUi(self, PasswordGenerator):
_translate = QtCore.QCoreApplication.translate
PasswordGenerator.setWindowTitle(_translate("PasswordGenerator", "MainWindow"))
self.DefaultListLabel.setText(_translate("PasswordGenerator", "Use default alphabet list (abc...)"))
self.AddPrefixLabel.setText(_translate("PasswordGenerator", "Add Prefix"))
self.IncludeNumsLabel.setText(_translate("PasswordGenerator", "Include Numbers"))
self.CustomListLabel.setText(_translate("PasswordGenerator", "Custom Characters (Overrides the default alphabet list)"))
self.SuffixNumLenLabel.setText(_translate("PasswordGenerator", "Max. Combo Length for Suffix"))
self.LeetCombinationsLabel.setText(_translate("PasswordGenerator", "Include Leet Combinatons"))
self.IncludeCombosLabel.setText(_translate("PasswordGenerator", "Add combinations of prefixes and suffixes"))
self.MaxComboLenLabel.setText(_translate("PasswordGenerator", "Max. Combo Length"))
self.GenWordListLabel.setText(_translate("PasswordGenerator", "Generate a word list"))
self.IncludeNumsSufLabel.setText(_translate("PasswordGenerator", "Add Numbers as suffix"))
self.PrefixSuffixSettingsLabel.setText(_translate("PasswordGenerator", "Prefix and Suffix settings"))
self.WLSettingsLabel.setText(_translate("PasswordGenerator", "Settings for word list"))
self.PrefixNumLenLabel.setText(_translate("PasswordGenerator", "Max. Combo length for prefix"))
self.CustomWordsLabel.setText(_translate("PasswordGenerator", "Custom word (Combinations for a custom word)"))
self.GenerateButton.setText(_translate("PasswordGenerator", "Generate Combinations"))
self.MinComboLenLabel.setText(_translate("PasswordGenerator", "Min. Combo length"))
self.IncludeNumsPreLabel.setText(_translate("PasswordGenerator", "Add Numbers as prefix"))
self.ExtandDefaultListLabel.setText(_translate("PasswordGenerator", "Extend the default alphabet list"))
self.GenralSettingsLabel.setText(_translate("PasswordGenerator", "General Settings"))
self.AddSuffixLabel.setText(_translate("PasswordGenerator", "Add Suffix"))
self.MinPreComboLenLabel.setText(_translate("PasswordGenerator", "Min. Combo length for prefix"))
self.MinSufComboLenLabel.setText(_translate("PasswordGenerator", "Min. Combo length for suffix"))
self.IncludeWLPrelabel.setText(_translate("PasswordGenerator", "Use a wordlist as prefix"))
self.IncludeWLSufLabel.setText(_translate("PasswordGenerator", "Use a wordlist as suffix"))
self.StoreComboLabel.setText(_translate("PasswordGenerator", "Store Combinations"))
self.UseDefTxtFileLabel.setText(_translate("PasswordGenerator", "Use the default text file"))
self.UseCustomTxtFileLabel.setText(_translate("PasswordGenerator", "Use a custom text file "))
self.pushButton.setText(_translate("PasswordGenerator", "Help"))
| 39.176225 | 168 | 0.635108 |
b7249f300faa7ec5b75bbdd866667020d98c3c31 | 82,743 | py | Python | tests/admin_inlines/tests.py | KaushikSathvara/django | 3b9fe906bf28d2e748ce4d9a1af5fbcd5df48946 | [
"BSD-3-Clause",
"0BSD"
] | 1 | 2021-11-07T12:42:43.000Z | 2021-11-07T12:42:43.000Z | tests/admin_inlines/tests.py | KaushikSathvara/django | 3b9fe906bf28d2e748ce4d9a1af5fbcd5df48946 | [
"BSD-3-Clause",
"0BSD"
] | 3 | 2022-02-01T12:12:02.000Z | 2022-02-01T12:27:06.000Z | tests/admin_inlines/tests.py | KaushikSathvara/django | 3b9fe906bf28d2e748ce4d9a1af5fbcd5df48946 | [
"BSD-3-Clause",
"0BSD"
] | null | null | null | from django.contrib.admin import ModelAdmin, TabularInline
from django.contrib.admin.helpers import InlineAdminForm
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.test import RequestFactory, TestCase, override_settings
from django.urls import reverse
from .admin import InnerInline, site as admin_site
from .models import (
Author, BinaryTree, Book, BothVerboseNameProfile, Chapter, Child,
ChildModel1, ChildModel2, Fashionista, FootNote, Holder, Holder2, Holder3,
Holder4, Inner, Inner2, Inner3, Inner4Stacked, Inner4Tabular, Novel,
OutfitItem, Parent, ParentModelWithCustomPk, Person, Poll, Profile,
ProfileCollection, Question, ShowInlineParent, Sighting, SomeChildModel,
SomeParentModel, Teacher, VerboseNamePluralProfile, VerboseNameProfile,
)
INLINE_CHANGELINK_HTML = 'class="inlinechangelink">Change</a>'
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', email='super@example.com', password='secret')
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInline(TestDataMixin, TestCase):
factory = RequestFactory()
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.holder = Holder.objects.create(dummy=13)
Inner.objects.create(dummy=42, holder=cls.holder)
cls.parent = SomeParentModel.objects.create(name='a')
SomeChildModel.objects.create(name='b', position='0', parent=cls.parent)
SomeChildModel.objects.create(name='c', position='1', parent=cls.parent)
cls.view_only_user = User.objects.create_user(
username='user', password='pwd', is_staff=True,
)
parent_ct = ContentType.objects.get_for_model(SomeParentModel)
child_ct = ContentType.objects.get_for_model(SomeChildModel)
permission = Permission.objects.get(
codename='view_someparentmodel',
content_type=parent_ct,
)
cls.view_only_user.user_permissions.add(permission)
permission = Permission.objects.get(
codename='view_somechildmodel',
content_type=child_ct,
)
cls.view_only_user.user_permissions.add(permission)
def setUp(self):
self.client.force_login(self.superuser)
def test_can_delete(self):
"""
can_delete should be passed to inlineformset factory.
"""
response = self.client.get(
reverse('admin:admin_inlines_holder_change', args=(self.holder.id,))
)
inner_formset = response.context['inline_admin_formsets'][0].formset
expected = InnerInline.can_delete
actual = inner_formset.can_delete
self.assertEqual(expected, actual, 'can_delete must be equal')
def test_readonly_stacked_inline_label(self):
"""Bug #13174."""
holder = Holder.objects.create(dummy=42)
Inner.objects.create(holder=holder, dummy=42, readonly='')
response = self.client.get(
reverse('admin:admin_inlines_holder_change', args=(holder.id,))
)
self.assertContains(response, '<label>Inner readonly label:</label>')
def test_many_to_many_inlines(self):
"Autogenerated many-to-many inlines are displayed correctly (#13407)"
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# The heading for the m2m inline block uses the right text
self.assertContains(response, '<h2>Author-book relationships</h2>')
# The "add another" label is correct
self.assertContains(response, 'Add another Author-book relationship')
# The '+' is dropped from the autogenerated form prefix (Author_books+)
self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_primary(self):
person = Person.objects.create(firstname='Imelda')
item = OutfitItem.objects.create(name='Shoes')
# Imelda likes shoes, but can't carry her own bags.
data = {
'shoppingweakness_set-TOTAL_FORMS': 1,
'shoppingweakness_set-INITIAL_FORMS': 0,
'shoppingweakness_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'person': person.id,
'max_weight': 0,
'shoppingweakness_set-0-item': item.id,
}
response = self.client.post(reverse('admin:admin_inlines_fashionista_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1)
def test_tabular_inline_column_css_class(self):
"""
Field names are included in the context to output a field-specific
CSS class name in the column headers.
"""
response = self.client.get(reverse('admin:admin_inlines_poll_add'))
text_field, call_me_field = list(response.context['inline_admin_formset'].fields())
# Editable field.
self.assertEqual(text_field['name'], 'text')
self.assertContains(response, '<th class="column-text required">')
# Read-only field.
self.assertEqual(call_me_field['name'], 'call_me')
self.assertContains(response, '<th class="column-call_me">')
def test_custom_form_tabular_inline_label(self):
"""
A model form with a form field specified (TitleForm.title1) should have
its label rendered in the tabular inline.
"""
response = self.client.get(reverse('admin:admin_inlines_titlecollection_add'))
self.assertContains(response, '<th class="column-title1 required">Title1</th>', html=True)
def test_custom_form_tabular_inline_extra_field_label(self):
response = self.client.get(reverse('admin:admin_inlines_outfititem_add'))
_, extra_field = list(response.context['inline_admin_formset'].fields())
self.assertEqual(extra_field['label'], 'Extra field')
def test_non_editable_custom_form_tabular_inline_extra_field_label(self):
response = self.client.get(reverse('admin:admin_inlines_chapter_add'))
_, extra_field = list(response.context['inline_admin_formset'].fields())
self.assertEqual(extra_field['label'], 'Extra field')
def test_custom_form_tabular_inline_overridden_label(self):
"""
SomeChildModelForm.__init__() overrides the label of a form field.
That label is displayed in the TabularInline.
"""
response = self.client.get(reverse('admin:admin_inlines_someparentmodel_add'))
field = list(response.context['inline_admin_formset'].fields())[0]
self.assertEqual(field['label'], 'new label')
self.assertContains(response, '<th class="column-name required">New label</th>', html=True)
def test_tabular_non_field_errors(self):
"""
non_field_errors are displayed correctly, including the correct value
for colspan.
"""
data = {
'title_set-TOTAL_FORMS': 1,
'title_set-INITIAL_FORMS': 0,
'title_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'title_set-0-title1': 'a title',
'title_set-0-title2': 'a different title',
}
response = self.client.post(reverse('admin:admin_inlines_titlecollection_add'), data)
# Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbox.
self.assertContains(
response,
'<tr class="row-form-errors"><td colspan="4"><ul class="errorlist nonfield">'
'<li>The two titles must be the same</li></ul></td></tr>'
)
def test_no_parent_callable_lookup(self):
"""Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable"""
# Identically named callable isn't present in the parent ModelAdmin,
# rendering of the add view shouldn't explode
response = self.client.get(reverse('admin:admin_inlines_novel_add'))
# View should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="chapter_set-group"'
)
def test_callable_lookup(self):
"""Admin inline should invoke local callable when its name is listed in readonly_fields"""
response = self.client.get(reverse('admin:admin_inlines_poll_add'))
# Add parent object view should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="question_set-group"'
)
# The right callable should be used for the inline readonly_fields
# column cells
self.assertContains(response, '<p>Callable in QuestionInline</p>')
def test_help_text(self):
"""
The inlines' model field help texts are displayed when using both the
stacked and tabular layouts.
"""
response = self.client.get(reverse('admin:admin_inlines_holder4_add'))
self.assertContains(response, '<div class="help">Awesome stacked help text is awesome.</div>', 4)
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Awesome tabular help text is awesome.)" '
'title="Awesome tabular help text is awesome.">',
1
)
# ReadOnly fields
response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add'))
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Help text for ReadOnlyInline)" '
'title="Help text for ReadOnlyInline">',
1
)
def test_tabular_model_form_meta_readonly_field(self):
"""
Tabular inlines use ModelForm.Meta.help_texts and labels for read-only
fields.
"""
response = self.client.get(reverse('admin:admin_inlines_someparentmodel_add'))
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Help text from ModelForm.Meta)" '
'title="Help text from ModelForm.Meta">'
)
self.assertContains(response, 'Label from ModelForm.Meta')
def test_inline_hidden_field_no_column(self):
"""#18263 -- Make sure hidden fields don't get a column in tabular inlines"""
parent = SomeParentModel.objects.create(name='a')
SomeChildModel.objects.create(name='b', position='0', parent=parent)
SomeChildModel.objects.create(name='c', position='1', parent=parent)
response = self.client.get(reverse('admin:admin_inlines_someparentmodel_change', args=(parent.pk,)))
self.assertNotContains(response, '<td class="field-position">')
self.assertInHTML(
'<input id="id_somechildmodel_set-1-position" '
'name="somechildmodel_set-1-position" type="hidden" value="1">',
response.rendered_content,
)
def test_tabular_inline_hidden_field_with_view_only_permissions(self):
"""
Content of hidden field is not visible in tabular inline when user has
view-only permission.
"""
self.client.force_login(self.view_only_user)
url = reverse(
'tabular_inline_hidden_field_admin:admin_inlines_someparentmodel_change',
args=(self.parent.pk,),
)
response = self.client.get(url)
self.assertInHTML('<th class="column-position hidden">Position</th>', response.rendered_content)
self.assertInHTML('<td class="field-position hidden"><p>0</p></td>', response.rendered_content)
self.assertInHTML('<td class="field-position hidden"><p>1</p></td>', response.rendered_content)
def test_stacked_inline_hidden_field_with_view_only_permissions(self):
"""
Content of hidden field is not visible in stacked inline when user has
view-only permission.
"""
self.client.force_login(self.view_only_user)
url = reverse(
'stacked_inline_hidden_field_in_group_admin:admin_inlines_someparentmodel_change',
args=(self.parent.pk,),
)
response = self.client.get(url)
# The whole line containing name + position fields is not hidden.
self.assertContains(response, '<div class="form-row field-name field-position">')
# The div containing the position field is hidden.
self.assertInHTML(
'<div class="fieldBox field-position hidden">'
'<label class="inline">Position:</label>'
'<div class="readonly">0</div></div>',
response.rendered_content,
)
self.assertInHTML(
'<div class="fieldBox field-position hidden">'
'<label class="inline">Position:</label>'
'<div class="readonly">1</div></div>',
response.rendered_content,
)
def test_stacked_inline_single_hidden_field_in_line_with_view_only_permissions(self):
"""
Content of hidden field is not visible in stacked inline when user has
view-only permission and the field is grouped on a separate line.
"""
self.client.force_login(self.view_only_user)
url = reverse(
'stacked_inline_hidden_field_on_single_line_admin:admin_inlines_someparentmodel_change',
args=(self.parent.pk,),
)
response = self.client.get(url)
# The whole line containing position field is hidden.
self.assertInHTML(
'<div class="form-row hidden field-position">'
'<div><label>Position:</label>'
'<div class="readonly">0</div></div></div>',
response.rendered_content,
)
self.assertInHTML(
'<div class="form-row hidden field-position">'
'<div><label>Position:</label>'
'<div class="readonly">1</div></div></div>',
response.rendered_content,
)
def test_tabular_inline_with_hidden_field_non_field_errors_has_correct_colspan(self):
"""
In tabular inlines, when a form has non-field errors, those errors
are rendered in a table line with a single cell spanning the whole
table width. Colspan must be equal to the number of visible columns.
"""
parent = SomeParentModel.objects.create(name='a')
child = SomeChildModel.objects.create(name='b', position='0', parent=parent)
url = reverse(
'tabular_inline_hidden_field_admin:admin_inlines_someparentmodel_change',
args=(parent.id,),
)
data = {
'name': parent.name,
'somechildmodel_set-TOTAL_FORMS': 1,
'somechildmodel_set-INITIAL_FORMS': 1,
'somechildmodel_set-MIN_NUM_FORMS': 0,
'somechildmodel_set-MAX_NUM_FORMS': 1000,
'_save': 'Save',
'somechildmodel_set-0-id': child.id,
'somechildmodel_set-0-parent': parent.id,
'somechildmodel_set-0-name': child.name,
'somechildmodel_set-0-position': 1,
}
response = self.client.post(url, data)
# Form has 3 visible columns and 1 hidden column.
self.assertInHTML(
'<thead><tr><th class="original"></th>'
'<th class="column-name required">Name</th>'
'<th class="column-position required hidden">Position</th>'
'<th>Delete?</th></tr></thead>',
response.rendered_content,
)
# The non-field error must be spanned on 3 (visible) columns.
self.assertInHTML(
'<tr class="row-form-errors"><td colspan="3">'
'<ul class="errorlist nonfield"><li>A non-field error</li></ul></td></tr>',
response.rendered_content,
)
def test_non_related_name_inline(self):
"""
Multiple inlines with related_name='+' have correct form prefixes.
"""
response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add'))
self.assertContains(response, '<input type="hidden" name="-1-0-id" id="id_-1-0-id">', html=True)
self.assertContains(
response,
'<input type="hidden" name="-1-0-capo_famiglia" id="id_-1-0-capo_famiglia">',
html=True
)
self.assertContains(
response,
'<input id="id_-1-0-name" type="text" class="vTextField" name="-1-0-name" maxlength="100">',
html=True
)
self.assertContains(response, '<input type="hidden" name="-2-0-id" id="id_-2-0-id">', html=True)
self.assertContains(
response,
'<input type="hidden" name="-2-0-capo_famiglia" id="id_-2-0-capo_famiglia">',
html=True
)
self.assertContains(
response,
'<input id="id_-2-0-name" type="text" class="vTextField" name="-2-0-name" maxlength="100">',
html=True
)
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_localize_pk_shortcut(self):
"""
The "View on Site" link is correct for locales that use thousand
separators.
"""
holder = Holder.objects.create(pk=123456789, dummy=42)
inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly='')
response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.id,)))
inner_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(inner).pk, inner.pk)
self.assertContains(response, inner_shortcut)
def test_custom_pk_shortcut(self):
"""
The "View on Site" link is correct for models with a custom primary key
field.
"""
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',)))
child1_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child1).pk, child1.pk)
child2_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child2).pk, child2.pk)
self.assertContains(response, child1_shortcut)
self.assertContains(response, child2_shortcut)
def test_create_inlines_on_inherited_model(self):
"""
An object can be created with inlines when it inherits another class.
"""
data = {
'name': 'Martian',
'sighting_set-TOTAL_FORMS': 1,
'sighting_set-INITIAL_FORMS': 0,
'sighting_set-MAX_NUM_FORMS': 0,
'sighting_set-0-place': 'Zone 51',
'_save': 'Save',
}
response = self.client.post(reverse('admin:admin_inlines_extraterrestrial_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Sighting.objects.filter(et__name='Martian').count(), 1)
def test_custom_get_extra_form(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
# The maximum number of forms should respect 'get_max_num' on the
# ModelAdmin
max_forms_input = (
'<input id="id_binarytree_set-MAX_NUM_FORMS" '
'name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d">'
)
# The total number of forms will remain the same in either case
total_forms_hidden = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="2">'
)
response = self.client.get(reverse('admin:admin_inlines_binarytree_add'))
self.assertInHTML(max_forms_input % 3, response.rendered_content)
self.assertInHTML(total_forms_hidden, response.rendered_content)
response = self.client.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,)))
self.assertInHTML(max_forms_input % 2, response.rendered_content)
self.assertInHTML(total_forms_hidden, response.rendered_content)
def test_min_num(self):
"""
min_num and extra determine number of forms.
"""
class MinNumInline(TabularInline):
model = BinaryTree
min_num = 2
extra = 3
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="2">'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="5">'
)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_add'))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertInHTML(min_forms, response.rendered_content)
self.assertInHTML(total_forms, response.rendered_content)
def test_custom_min_num(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
class MinNumInline(TabularInline):
model = BinaryTree
extra = 3
def get_min_num(self, request, obj=None, **kwargs):
if obj:
return 5
return 2
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="%d">'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="%d">'
)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_add'))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertInHTML(min_forms % 2, response.rendered_content)
self.assertInHTML(total_forms % 5, response.rendered_content)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(bt_head.id))
self.assertInHTML(min_forms % 5, response.rendered_content)
self.assertInHTML(total_forms % 8, response.rendered_content)
def test_inline_nonauto_noneditable_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input id="id_nonautopkbook_set-0-rand_pk" '
'name="nonautopkbook_set-0-rand_pk" type="hidden">',
html=True
)
self.assertContains(
response,
'<input id="id_nonautopkbook_set-2-0-rand_pk" '
'name="nonautopkbook_set-2-0-rand_pk" type="hidden">',
html=True
)
def test_inline_nonauto_noneditable_inherited_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input id="id_nonautopkbookchild_set-0-nonautopkbook_ptr" '
'name="nonautopkbookchild_set-0-nonautopkbook_ptr" type="hidden">',
html=True
)
self.assertContains(
response,
'<input id="id_nonautopkbookchild_set-2-nonautopkbook_ptr" '
'name="nonautopkbookchild_set-2-nonautopkbook_ptr" type="hidden">',
html=True
)
def test_inline_editable_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" '
'name="editablepkbook_set-0-manual_pk" type="number">',
html=True, count=1
)
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" '
'name="editablepkbook_set-2-0-manual_pk" type="number">',
html=True, count=1
)
def test_stacked_inline_edit_form_contains_has_original_class(self):
holder = Holder.objects.create(dummy=1)
holder.inner_set.create(dummy=1)
response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.pk,)))
self.assertContains(
response,
'<div class="inline-related has_original" id="inner_set-0">',
count=1
)
self.assertContains(
response,
'<div class="inline-related" id="inner_set-1">',
count=1
)
def test_inlines_show_change_link_registered(self):
"Inlines `show_change_link` for registered models when enabled."
holder = Holder4.objects.create(dummy=1)
item1 = Inner4Stacked.objects.create(dummy=1, holder=holder)
item2 = Inner4Tabular.objects.create(dummy=1, holder=holder)
items = (
('inner4stacked', item1.pk),
('inner4tabular', item2.pk),
)
response = self.client.get(reverse('admin:admin_inlines_holder4_change', args=(holder.pk,)))
self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model)
for model, pk in items:
url = reverse('admin:admin_inlines_%s_change' % model, args=(pk,))
self.assertContains(response, '<a href="%s" %s' % (url, INLINE_CHANGELINK_HTML))
def test_inlines_show_change_link_unregistered(self):
"Inlines `show_change_link` disabled for unregistered models."
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',)))
self.assertFalse(response.context['inline_admin_formset'].opts.has_registered_model)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
def test_tabular_inline_show_change_link_false_registered(self):
"Inlines `show_change_link` disabled by default."
poll = Poll.objects.create(name="New poll")
Question.objects.create(poll=poll)
response = self.client.get(reverse('admin:admin_inlines_poll_change', args=(poll.pk,)))
self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
def test_noneditable_inline_has_field_inputs(self):
"""Inlines without change permission shows field inputs on add form."""
response = self.client.get(reverse('admin:admin_inlines_novelreadonlychapter_add'))
self.assertContains(
response,
'<input type="text" name="chapter_set-0-name" '
'class="vTextField" maxlength="40" id="id_chapter_set-0-name">',
html=True
)
def test_inlines_plural_heading_foreign_key(self):
response = self.client.get(reverse('admin:admin_inlines_holder4_add'))
self.assertContains(response, '<h2>Inner4 stackeds</h2>', html=True)
self.assertContains(response, '<h2>Inner4 tabulars</h2>', html=True)
def test_inlines_singular_heading_one_to_one(self):
response = self.client.get(reverse('admin:admin_inlines_person_add'))
self.assertContains(response, '<h2>Author</h2>', html=True) # Tabular.
self.assertContains(response, '<h2>Fashionista</h2>', html=True) # Stacked.
def test_inlines_based_on_model_state(self):
parent = ShowInlineParent.objects.create(show_inlines=False)
data = {
'show_inlines': 'on',
'_save': 'Save',
}
change_url = reverse(
'admin:admin_inlines_showinlineparent_change',
args=(parent.id,),
)
response = self.client.post(change_url, data)
self.assertEqual(response.status_code, 302)
parent.refresh_from_db()
self.assertIs(parent.show_inlines, True)
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlineMedia(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_inline_media_only_base(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
def test_inline_media_only_inline(self):
holder = Holder3(dummy=13)
holder.save()
Inner3(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder3_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertEqual(
response.context['inline_admin_formsets'][0].media._js,
[
'admin/js/vendor/jquery/jquery.min.js',
'my_awesome_inline_scripts.js',
'custom_number.js',
'admin/js/jquery.init.js',
'admin/js/inlines.js',
]
)
self.assertContains(response, 'my_awesome_inline_scripts.js')
def test_all_inline_media(self):
holder = Holder2(dummy=13)
holder.save()
Inner2(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
self.assertContains(response, 'my_awesome_inline_scripts.js')
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlineAdminForm(TestCase):
def test_immutable_content_type(self):
"""Regression for #9362
The problem depends only on InlineAdminForm and its "original"
argument, so we can safely set the other arguments to None/{}. We just
need to check that the content_type argument of Child isn't altered by
the internals of the inline form."""
sally = Teacher.objects.create(name='Sally')
john = Parent.objects.create(name='John')
joe = Child.objects.create(name='Joe', teacher=sally, parent=john)
iaf = InlineAdminForm(None, None, {}, {}, joe)
parent_ct = ContentType.objects.get_for_model(Parent)
self.assertEqual(iaf.original.content_type, parent_ct)
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlineProtectedOnDelete(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_deleting_inline_with_protected_delete_does_not_validate(self):
lotr = Novel.objects.create(name='Lord of the rings')
chapter = Chapter.objects.create(novel=lotr, name='Many Meetings')
foot_note = FootNote.objects.create(chapter=chapter, note='yadda yadda')
change_url = reverse('admin:admin_inlines_novel_change', args=(lotr.id,))
response = self.client.get(change_url)
data = {
'name': lotr.name,
'chapter_set-TOTAL_FORMS': 1,
'chapter_set-INITIAL_FORMS': 1,
'chapter_set-MAX_NUM_FORMS': 1000,
'_save': 'Save',
'chapter_set-0-id': chapter.id,
'chapter_set-0-name': chapter.name,
'chapter_set-0-novel': lotr.id,
'chapter_set-0-DELETE': 'on'
}
response = self.client.post(change_url, data)
self.assertContains(response, "Deleting chapter %s would require deleting "
"the following protected related objects: foot note %s"
% (chapter, foot_note))
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlinePermissions(TestCase):
"""
Make sure the admin respects permissions for objects that are edited
inline. Refs #8060.
"""
@classmethod
def setUpTestData(cls):
cls.user = User(username='admin', is_staff=True, is_active=True)
cls.user.set_password('secret')
cls.user.save()
cls.author_ct = ContentType.objects.get_for_model(Author)
cls.holder_ct = ContentType.objects.get_for_model(Holder2)
cls.book_ct = ContentType.objects.get_for_model(Book)
cls.inner_ct = ContentType.objects.get_for_model(Inner2)
# User always has permissions to add and change Authors, and Holders,
# the main (parent) models of the inlines. Permissions on the inlines
# vary per test.
permission = Permission.objects.get(codename='add_author', content_type=cls.author_ct)
cls.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_author', content_type=cls.author_ct)
cls.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='add_holder2', content_type=cls.holder_ct)
cls.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_holder2', content_type=cls.holder_ct)
cls.user.user_permissions.add(permission)
author = Author.objects.create(pk=1, name='The Author')
cls.book = author.books.create(name='The inline Book')
cls.author_change_url = reverse('admin:admin_inlines_author_change', args=(author.id,))
# Get the ID of the automatically created intermediate model for the Author-Book m2m
author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=cls.book)
cls.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk
cls.holder = Holder2.objects.create(dummy=13)
cls.inner2 = Inner2.objects.create(dummy=42, holder=cls.holder)
def setUp(self):
self.holder_change_url = reverse('admin:admin_inlines_holder2_change', args=(self.holder.id,))
self.client.force_login(self.user)
def test_inline_add_m2m_noperm(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_noperm(self):
response = self.client.get(reverse('admin:admin_inlines_holder2_add'))
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_noperm(self):
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_change_fk_noperm(self):
response = self.client.get(self.holder_change_url)
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_add_m2m_view_only_perm(self):
permission = Permission.objects.get(codename='view_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# View-only inlines. (It could be nicer to hide the empty, non-editable
# inlines on the add page.)
self.assertIs(response.context['inline_admin_formset'].has_view_permission, True)
self.assertIs(response.context['inline_admin_formset'].has_add_permission, False)
self.assertIs(response.context['inline_admin_formset'].has_change_permission, False)
self.assertIs(response.context['inline_admin_formset'].has_delete_permission, False)
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(
response,
'<input type="hidden" name="Author_books-TOTAL_FORMS" value="0" '
'id="id_Author_books-TOTAL_FORMS">',
html=True,
)
self.assertNotContains(response, 'Add another Author-Book Relationship')
def test_inline_add_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# No change permission on Books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_holder2_add'))
# Add permission on inner2s, so we get the inline
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS">', html=True)
def test_inline_change_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
self.assertNotContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_m2m_view_only_perm(self):
permission = Permission.objects.get(codename='view_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# View-only inlines.
self.assertIs(response.context['inline_admin_formset'].has_view_permission, True)
self.assertIs(response.context['inline_admin_formset'].has_add_permission, False)
self.assertIs(response.context['inline_admin_formset'].has_change_permission, False)
self.assertIs(response.context['inline_admin_formset'].has_delete_permission, False)
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(
response,
'<input type="hidden" name="Author_books-TOTAL_FORMS" value="1" '
'id="id_Author_books-TOTAL_FORMS">',
html=True,
)
# The field in the inline is read-only.
self.assertContains(response, '<p>%s</p>' % self.book)
self.assertNotContains(
response,
'<input type="checkbox" name="Author_books-0-DELETE" id="id_Author_books-0-DELETE">',
html=True,
)
def test_inline_change_m2m_change_perm(self):
permission = Permission.objects.get(codename='change_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# We have change perm on books, so we can add/change/delete inlines
self.assertIs(response.context['inline_admin_formset'].has_view_permission, True)
self.assertIs(response.context['inline_admin_formset'].has_add_permission, True)
self.assertIs(response.context['inline_admin_formset'].has_change_permission, True)
self.assertIs(response.context['inline_admin_formset'].has_delete_permission, True)
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(response, 'Add another Author-book relationship')
self.assertContains(response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" '
'value="4" name="Author_books-TOTAL_FORMS">', html=True)
self.assertContains(
response,
'<input type="hidden" id="id_Author_books-0-id" value="%i" '
'name="Author_books-0-id">' % self.author_book_auto_m2m_intermediate_id,
html=True
)
self.assertContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add permission on inner2s, so we can add but not modify existing
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
# 3 extra forms only, not the existing instance form
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="3" '
'name="inner2_set-TOTAL_FORMS">',
html=True
)
self.assertNotContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id,
html=True
)
def test_inline_change_fk_change_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change permission on inner2s, so we can change existing but not add new
self.assertContains(response, '<h2>Inner2s</h2>', count=2)
# Just the one form for existing instances
self.assertContains(
response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" name="inner2_set-TOTAL_FORMS">',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id,
html=True
)
# max-num 0 means we can't add new ones
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" value="0" name="inner2_set-MAX_NUM_FORMS">',
html=True
)
# TabularInline
self.assertContains(response, '<th class="column-dummy required">Dummy</th>', html=True)
self.assertContains(
response,
'<input type="number" name="inner2_set-2-0-dummy" value="%s" '
'class="vIntegerField" id="id_inner2_set-2-0-dummy">' % self.inner2.dummy,
html=True,
)
def test_inline_change_fk_add_change_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add/change perm, so we can add new and change existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance and three extra for new
self.assertContains(
response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" name="inner2_set-TOTAL_FORMS">',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id,
html=True
)
def test_inline_change_fk_change_del_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change/delete perm on inner2s, so we can change/delete existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, no new
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" name="inner2_set-TOTAL_FORMS">',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id,
html=True
)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
def test_inline_change_fk_all_perms(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# All perms on inner2s, so we can add/change/delete
self.assertContains(response, '<h2>Inner2s</h2>', count=2)
# One form for existing instance only, three for new
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" name="inner2_set-TOTAL_FORMS">',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id,
html=True
)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
# TabularInline
self.assertContains(response, '<th class="column-dummy required">Dummy</th>', html=True)
self.assertContains(
response,
'<input type="number" name="inner2_set-2-0-dummy" value="%s" '
'class="vIntegerField" id="id_inner2_set-2-0-dummy">' % self.inner2.dummy,
html=True,
)
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestReadOnlyChangeViewInlinePermissions(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user('testing', password='password', is_staff=True)
cls.user.user_permissions.add(
Permission.objects.get(codename='view_poll', content_type=ContentType.objects.get_for_model(Poll))
)
cls.user.user_permissions.add(
*Permission.objects.filter(
codename__endswith="question", content_type=ContentType.objects.get_for_model(Question)
).values_list('pk', flat=True)
)
cls.poll = Poll.objects.create(name="Survey")
cls.add_url = reverse('admin:admin_inlines_poll_add')
cls.change_url = reverse('admin:admin_inlines_poll_change', args=(cls.poll.id,))
def setUp(self):
self.client.force_login(self.user)
def test_add_url_not_allowed(self):
response = self.client.get(self.add_url)
self.assertEqual(response.status_code, 403)
response = self.client.post(self.add_url, {})
self.assertEqual(response.status_code, 403)
def test_post_to_change_url_not_allowed(self):
response = self.client.post(self.change_url, {})
self.assertEqual(response.status_code, 403)
def test_get_to_change_url_is_allowed(self):
response = self.client.get(self.change_url)
self.assertEqual(response.status_code, 200)
def test_main_model_is_rendered_as_read_only(self):
response = self.client.get(self.change_url)
self.assertContains(
response,
'<div class="readonly">%s</div>' % self.poll.name,
html=True
)
input = '<input type="text" name="name" value="%s" class="vTextField" maxlength="40" required id="id_name">'
self.assertNotContains(
response,
input % self.poll.name,
html=True
)
def test_inlines_are_rendered_as_read_only(self):
question = Question.objects.create(text="How will this be rendered?", poll=self.poll)
response = self.client.get(self.change_url)
self.assertContains(
response,
'<td class="field-text"><p>%s</p></td>' % question.text,
html=True
)
self.assertNotContains(response, 'id="id_question_set-0-text"')
self.assertNotContains(response, 'id="id_related_objs-0-DELETE"')
def test_submit_line_shows_only_close_button(self):
response = self.client.get(self.change_url)
self.assertContains(
response,
'<a href="/admin/admin_inlines/poll/" class="closelink">Close</a>',
html=True
)
delete_link = '<p class="deletelink-box"><a href="/admin/admin_inlines/poll/%s/delete/" class="deletelink">Delete</a></p>' # noqa
self.assertNotContains(
response,
delete_link % self.poll.id,
html=True
)
self.assertNotContains(response, '<input type="submit" value="Save and add another" name="_addanother">')
self.assertNotContains(response, '<input type="submit" value="Save and continue editing" name="_continue">')
def test_inline_delete_buttons_are_not_shown(self):
Question.objects.create(text="How will this be rendered?", poll=self.poll)
response = self.client.get(self.change_url)
self.assertNotContains(
response,
'<input type="checkbox" name="question_set-0-DELETE" id="id_question_set-0-DELETE">',
html=True
)
def test_extra_inlines_are_not_shown(self):
response = self.client.get(self.change_url)
self.assertNotContains(response, 'id="id_question_set-0-text"')
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestVerboseNameInlineForms(TestDataMixin, TestCase):
factory = RequestFactory()
def test_verbose_name_inline(self):
class NonVerboseProfileInline(TabularInline):
model = Profile
verbose_name = 'Non-verbose childs'
class VerboseNameProfileInline(TabularInline):
model = VerboseNameProfile
verbose_name = 'Childs with verbose name'
class VerboseNamePluralProfileInline(TabularInline):
model = VerboseNamePluralProfile
verbose_name = 'Childs with verbose name plural'
class BothVerboseNameProfileInline(TabularInline):
model = BothVerboseNameProfile
verbose_name = 'Childs with both verbose names'
modeladmin = ModelAdmin(ProfileCollection, admin_site)
modeladmin.inlines = [
NonVerboseProfileInline,
VerboseNameProfileInline,
VerboseNamePluralProfileInline,
BothVerboseNameProfileInline,
]
obj = ProfileCollection.objects.create()
url = reverse('admin:admin_inlines_profilecollection_change', args=(obj.pk,))
request = self.factory.get(url)
request.user = self.superuser
response = modeladmin.changeform_view(request)
self.assertNotContains(response, 'Add another Profile')
# Non-verbose model.
self.assertContains(response, '<h2>Non-verbose childss</h2>')
self.assertContains(response, 'Add another Non-verbose child')
self.assertNotContains(response, '<h2>Profiles</h2>')
# Model with verbose name.
self.assertContains(response, '<h2>Childs with verbose names</h2>')
self.assertContains(response, 'Add another Childs with verbose name')
self.assertNotContains(response, '<h2>Model with verbose name onlys</h2>')
self.assertNotContains(response, 'Add another Model with verbose name only')
# Model with verbose name plural.
self.assertContains(response, '<h2>Childs with verbose name plurals</h2>')
self.assertContains(response, 'Add another Childs with verbose name plural')
self.assertNotContains(response, '<h2>Model with verbose name plural only</h2>')
# Model with both verbose names.
self.assertContains(response, '<h2>Childs with both verbose namess</h2>')
self.assertContains(response, 'Add another Childs with both verbose names')
self.assertNotContains(response, '<h2>Model with both - plural name</h2>')
self.assertNotContains(response, 'Add another Model with both - name')
def test_verbose_name_plural_inline(self):
class NonVerboseProfileInline(TabularInline):
model = Profile
verbose_name_plural = 'Non-verbose childs'
class VerboseNameProfileInline(TabularInline):
model = VerboseNameProfile
verbose_name_plural = 'Childs with verbose name'
class VerboseNamePluralProfileInline(TabularInline):
model = VerboseNamePluralProfile
verbose_name_plural = 'Childs with verbose name plural'
class BothVerboseNameProfileInline(TabularInline):
model = BothVerboseNameProfile
verbose_name_plural = 'Childs with both verbose names'
modeladmin = ModelAdmin(ProfileCollection, admin_site)
modeladmin.inlines = [
NonVerboseProfileInline,
VerboseNameProfileInline,
VerboseNamePluralProfileInline,
BothVerboseNameProfileInline,
]
obj = ProfileCollection.objects.create()
url = reverse('admin:admin_inlines_profilecollection_change', args=(obj.pk,))
request = self.factory.get(url)
request.user = self.superuser
response = modeladmin.changeform_view(request)
# Non-verbose model.
self.assertContains(response, '<h2>Non-verbose childs</h2>')
self.assertContains(response, 'Add another Profile')
self.assertNotContains(response, '<h2>Profiles</h2>')
# Model with verbose name.
self.assertContains(response, '<h2>Childs with verbose name</h2>')
self.assertContains(response, 'Add another Model with verbose name only')
self.assertNotContains(response, '<h2>Model with verbose name onlys</h2>')
# Model with verbose name plural.
self.assertContains(response, '<h2>Childs with verbose name plural</h2>')
self.assertContains(response, 'Add another Profile')
self.assertNotContains(response, '<h2>Model with verbose name plural only</h2>')
# Model with both verbose names.
self.assertContains(response, '<h2>Childs with both verbose names</h2>')
self.assertContains(response, 'Add another Model with both - name')
self.assertNotContains(response, '<h2>Model with both - plural name</h2>')
def test_both_verbose_names_inline(self):
class NonVerboseProfileInline(TabularInline):
model = Profile
verbose_name = 'Non-verbose childs - name'
verbose_name_plural = 'Non-verbose childs - plural name'
class VerboseNameProfileInline(TabularInline):
model = VerboseNameProfile
verbose_name = 'Childs with verbose name - name'
verbose_name_plural = 'Childs with verbose name - plural name'
class VerboseNamePluralProfileInline(TabularInline):
model = VerboseNamePluralProfile
verbose_name = 'Childs with verbose name plural - name'
verbose_name_plural = 'Childs with verbose name plural - plural name'
class BothVerboseNameProfileInline(TabularInline):
model = BothVerboseNameProfile
verbose_name = 'Childs with both - name'
verbose_name_plural = 'Childs with both - plural name'
modeladmin = ModelAdmin(ProfileCollection, admin_site)
modeladmin.inlines = [
NonVerboseProfileInline,
VerboseNameProfileInline,
VerboseNamePluralProfileInline,
BothVerboseNameProfileInline,
]
obj = ProfileCollection.objects.create()
url = reverse('admin:admin_inlines_profilecollection_change', args=(obj.pk,))
request = self.factory.get(url)
request.user = self.superuser
response = modeladmin.changeform_view(request)
self.assertNotContains(response, 'Add another Profile')
# Non-verbose model.
self.assertContains(response, '<h2>Non-verbose childs - plural name</h2>')
self.assertContains(response, 'Add another Non-verbose childs - name')
self.assertNotContains(response, '<h2>Profiles</h2>')
# Model with verbose name.
self.assertContains(response, '<h2>Childs with verbose name - plural name</h2>')
self.assertContains(response, 'Add another Childs with verbose name - name')
self.assertNotContains(response, '<h2>Model with verbose name onlys</h2>')
# Model with verbose name plural.
self.assertContains(
response,
'<h2>Childs with verbose name plural - plural name</h2>',
)
self.assertContains(
response,
'Add another Childs with verbose name plural - name',
)
self.assertNotContains(response, '<h2>Model with verbose name plural only</h2>')
# Model with both verbose names.
self.assertContains(response, '<h2>Childs with both - plural name</h2>')
self.assertContains(response, 'Add another Childs with both - name')
self.assertNotContains(response, '<h2>Model with both - plural name</h2>')
self.assertNotContains(response, 'Add another Model with both - name')
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ['admin_inlines'] + AdminSeleniumTestCase.available_apps
def setUp(self):
User.objects.create_superuser(username='super', password='secret', email='super@example.com')
def test_add_stackeds(self):
"""
The "Add another XXX" link correctly adds items to the stacked formset.
"""
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add'))
inline_id = '#inner4stacked_set-group'
def rows_length():
return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
self.assertEqual(rows_length(), 4)
def test_delete_stackeds(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add'))
inline_id = '#inner4stacked_set-group'
def rows_length():
return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
add_button.click()
self.assertEqual(rows_length(), 5, msg="sanity check")
for delete_link in self.selenium.find_elements_by_css_selector('%s .inline-deletelink' % inline_id):
delete_link.click()
with self.disable_implicit_wait():
self.assertEqual(rows_length(), 0)
def test_delete_invalid_stacked_inlines(self):
from selenium.common.exceptions import NoSuchElementException
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add'))
inline_id = '#inner4stacked_set-group'
def rows_length():
return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
add_button.click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector('#id_inner4stacked_set-4-dummy')), 1)
# Enter some data and click 'Save'.
self.selenium.find_element_by_name('dummy').send_keys('1')
self.selenium.find_element_by_name('inner4stacked_set-0-dummy').send_keys('100')
self.selenium.find_element_by_name('inner4stacked_set-1-dummy').send_keys('101')
self.selenium.find_element_by_name('inner4stacked_set-2-dummy').send_keys('222')
self.selenium.find_element_by_name('inner4stacked_set-3-dummy').send_keys('103')
self.selenium.find_element_by_name('inner4stacked_set-4-dummy').send_keys('222')
with self.wait_page_loaded():
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.assertEqual(rows_length(), 5, msg="sanity check")
errorlist = self.selenium.find_element_by_css_selector(
'%s .dynamic-inner4stacked_set .errorlist li' % inline_id
)
self.assertEqual('Please correct the duplicate values below.', errorlist.text)
delete_link = self.selenium.find_element_by_css_selector('#inner4stacked_set-4 .inline-deletelink')
delete_link.click()
self.assertEqual(rows_length(), 4)
with self.disable_implicit_wait(), self.assertRaises(NoSuchElementException):
self.selenium.find_element_by_css_selector('%s .dynamic-inner4stacked_set .errorlist li' % inline_id)
with self.wait_page_loaded():
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
# The objects have been created in the database.
self.assertEqual(Inner4Stacked.objects.all().count(), 4)
def test_delete_invalid_tabular_inlines(self):
from selenium.common.exceptions import NoSuchElementException
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add'))
inline_id = '#inner4tabular_set-group'
def rows_length():
return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4tabular_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 tabular')
add_button.click()
add_button.click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector('#id_inner4tabular_set-4-dummy')), 1)
# Enter some data and click 'Save'.
self.selenium.find_element_by_name('dummy').send_keys('1')
self.selenium.find_element_by_name('inner4tabular_set-0-dummy').send_keys('100')
self.selenium.find_element_by_name('inner4tabular_set-1-dummy').send_keys('101')
self.selenium.find_element_by_name('inner4tabular_set-2-dummy').send_keys('222')
self.selenium.find_element_by_name('inner4tabular_set-3-dummy').send_keys('103')
self.selenium.find_element_by_name('inner4tabular_set-4-dummy').send_keys('222')
with self.wait_page_loaded():
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.assertEqual(rows_length(), 5, msg="sanity check")
# Non-field errorlist is in its own <tr> just before
# tr#inner4tabular_set-3:
errorlist = self.selenium.find_element_by_css_selector(
'%s #inner4tabular_set-3 + .row-form-errors .errorlist li' % inline_id
)
self.assertEqual('Please correct the duplicate values below.', errorlist.text)
delete_link = self.selenium.find_element_by_css_selector('#inner4tabular_set-4 .inline-deletelink')
delete_link.click()
self.assertEqual(rows_length(), 4)
with self.disable_implicit_wait(), self.assertRaises(NoSuchElementException):
self.selenium.find_element_by_css_selector('%s .dynamic-inner4tabular_set .errorlist li' % inline_id)
with self.wait_page_loaded():
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
# The objects have been created in the database.
self.assertEqual(Inner4Tabular.objects.all().count(), 4)
def test_add_inlines(self):
"""
The "Add another XXX" link correctly adds items to the inline form.
"""
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add'))
# There's only one inline to start with and it has the correct ID.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 1)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[0].get_attribute('id'),
'profile_set-0')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]')), 1)
# Add an inline
self.selenium.find_element_by_link_text('Add another Profile').click()
# The inline has been added, it has the right id, and it contains the
# correct fields.
self.assertEqual(len(self.selenium.find_elements_by_css_selector('.dynamic-profile_set')), 2)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[1].get_attribute('id'), 'profile_set-1')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]')), 1)
# Let's add another one to be sure
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector('.dynamic-profile_set')), 3)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[2].get_attribute('id'), 'profile_set-2')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]')), 1)
# Enter some data and click 'Save'
self.selenium.find_element_by_name('profile_set-0-first_name').send_keys('0 first name 1')
self.selenium.find_element_by_name('profile_set-0-last_name').send_keys('0 last name 2')
self.selenium.find_element_by_name('profile_set-1-first_name').send_keys('1 first name 1')
self.selenium.find_element_by_name('profile_set-1-last_name').send_keys('1 last name 2')
self.selenium.find_element_by_name('profile_set-2-first_name').send_keys('2 first name 1')
self.selenium.find_element_by_name('profile_set-2-last_name').send_keys('2 last name 2')
with self.wait_page_loaded():
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
# The objects have been created in the database
self.assertEqual(ProfileCollection.objects.all().count(), 1)
self.assertEqual(Profile.objects.all().count(), 3)
def test_add_inline_link_absent_for_view_only_parent_model(self):
from selenium.common.exceptions import NoSuchElementException
user = User.objects.create_user('testing', password='password', is_staff=True)
user.user_permissions.add(
Permission.objects.get(codename='view_poll', content_type=ContentType.objects.get_for_model(Poll))
)
user.user_permissions.add(
*Permission.objects.filter(
codename__endswith="question", content_type=ContentType.objects.get_for_model(Question)
).values_list('pk', flat=True)
)
self.admin_login(username='testing', password='password')
poll = Poll.objects.create(name="Survey")
change_url = reverse('admin:admin_inlines_poll_change', args=(poll.id,))
self.selenium.get(self.live_server_url + change_url)
with self.disable_implicit_wait():
with self.assertRaises(NoSuchElementException):
self.selenium.find_element_by_link_text('Add another Question')
def test_delete_inlines(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add'))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 5)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-3')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-4')), 1)
# Click on a few delete buttons
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1 td.delete a').click()
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2 td.delete a').click()
# The rows are gone and the IDs have been re-sequenced
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 3)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
def test_collapsed_inlines(self):
# Collapsed inlines have SHOW/HIDE links.
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_author_add'))
# One field is in a stacked inline, other in a tabular one.
test_fields = ['#id_nonautopkbook_set-0-title', '#id_nonautopkbook_set-2-0-title']
show_links = self.selenium.find_elements_by_link_text('SHOW')
self.assertEqual(len(show_links), 3)
for show_index, field_name in enumerate(test_fields, 0):
self.wait_until_invisible(field_name)
show_links[show_index].click()
self.wait_until_visible(field_name)
hide_links = self.selenium.find_elements_by_link_text('HIDE')
self.assertEqual(len(hide_links), 2)
for hide_index, field_name in enumerate(test_fields, 0):
self.wait_until_visible(field_name)
hide_links[hide_index].click()
self.wait_until_invisible(field_name)
def test_added_stacked_inline_with_collapsed_fields(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_teacher_add'))
self.selenium.find_element_by_link_text('Add another Child').click()
test_fields = ['#id_child_set-0-name', '#id_child_set-1-name']
show_links = self.selenium.find_elements_by_link_text('SHOW')
self.assertEqual(len(show_links), 2)
for show_index, field_name in enumerate(test_fields, 0):
self.wait_until_invisible(field_name)
show_links[show_index].click()
self.wait_until_visible(field_name)
hide_links = self.selenium.find_elements_by_link_text('HIDE')
self.assertEqual(len(hide_links), 2)
for hide_index, field_name in enumerate(test_fields, 0):
self.wait_until_visible(field_name)
hide_links[hide_index].click()
self.wait_until_invisible(field_name)
def assertBorder(self, element, border):
width, style, color = border.split(' ')
border_properties = [
'border-bottom-%s',
'border-left-%s',
'border-right-%s',
'border-top-%s',
]
for prop in border_properties:
prop = prop % 'width'
self.assertEqual(element.value_of_css_property(prop), width)
for prop in border_properties:
prop = prop % 'style'
self.assertEqual(element.value_of_css_property(prop), style)
# Convert hex color to rgb.
self.assertRegex(color, '#[0-9a-f]{6}')
r, g, b = int(color[1:3], 16), int(color[3:5], 16), int(color[5:], 16)
# The value may be expressed as either rgb() or rgba() depending on the
# browser.
colors = [
'rgb(%d, %d, %d)' % (r, g, b),
'rgba(%d, %d, %d, 1)' % (r, g, b),
]
for prop in border_properties:
prop = prop % 'color'
self.assertIn(element.value_of_css_property(prop), colors)
def test_inline_formset_error_input_border(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder5_add'))
self.wait_until_visible('#id_dummy')
self.selenium.find_element_by_id('id_dummy').send_keys(1)
fields = ['id_inner5stacked_set-0-dummy', 'id_inner5tabular_set-0-dummy']
show_links = self.selenium.find_elements_by_link_text('SHOW')
for show_index, field_name in enumerate(fields):
show_links[show_index].click()
self.wait_until_visible('#' + field_name)
self.selenium.find_element_by_id(field_name).send_keys(1)
# Before save all inputs have default border
for inline in ('stacked', 'tabular'):
for field_name in ('name', 'select', 'text'):
element_id = 'id_inner5%s_set-0-%s' % (inline, field_name)
self.assertBorder(
self.selenium.find_element_by_id(element_id),
'1px solid #cccccc',
)
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
# Test the red border around inputs by css selectors
stacked_selectors = ['.errors input', '.errors select', '.errors textarea']
for selector in stacked_selectors:
self.assertBorder(
self.selenium.find_element_by_css_selector(selector),
'1px solid #ba2121',
)
tabular_selectors = [
'td ul.errorlist + input', 'td ul.errorlist + select', 'td ul.errorlist + textarea'
]
for selector in tabular_selectors:
self.assertBorder(
self.selenium.find_element_by_css_selector(selector),
'1px solid #ba2121',
)
def test_inline_formset_error(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder5_add'))
stacked_inline_formset_selector = 'div#inner5stacked_set-group fieldset.module.collapse'
tabular_inline_formset_selector = 'div#inner5tabular_set-group fieldset.module.collapse'
# Inlines without errors, both inlines collapsed
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.assertEqual(
len(self.selenium.find_elements_by_css_selector(stacked_inline_formset_selector + '.collapsed')), 1
)
self.assertEqual(
len(self.selenium.find_elements_by_css_selector(tabular_inline_formset_selector + '.collapsed')), 1
)
show_links = self.selenium.find_elements_by_link_text('SHOW')
self.assertEqual(len(show_links), 2)
# Inlines with errors, both inlines expanded
test_fields = ['#id_inner5stacked_set-0-dummy', '#id_inner5tabular_set-0-dummy']
for show_index, field_name in enumerate(test_fields):
show_links[show_index].click()
self.wait_until_visible(field_name)
self.selenium.find_element_by_id(field_name[1:]).send_keys(1)
hide_links = self.selenium.find_elements_by_link_text('HIDE')
self.assertEqual(len(hide_links), 2)
for hide_index, field_name in enumerate(test_fields):
hide_link = hide_links[hide_index]
self.selenium.execute_script('window.scrollTo(0, %s);' % hide_link.location['y'])
hide_link.click()
self.wait_until_invisible(field_name)
with self.wait_page_loaded():
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
with self.disable_implicit_wait():
self.assertEqual(
len(self.selenium.find_elements_by_css_selector(stacked_inline_formset_selector + '.collapsed')), 0
)
self.assertEqual(
len(self.selenium.find_elements_by_css_selector(tabular_inline_formset_selector + '.collapsed')), 0
)
self.assertEqual(
len(self.selenium.find_elements_by_css_selector(stacked_inline_formset_selector)), 1
)
self.assertEqual(
len(self.selenium.find_elements_by_css_selector(tabular_inline_formset_selector)), 1
)
def test_inlines_verbose_name(self):
"""
The item added by the "Add another XXX" link must use the correct
verbose_name in the inline form.
"""
self.admin_login(username='super', password='secret')
# Hide sidebar.
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_course_add'))
toggle_button = self.selenium.find_element_by_css_selector('#toggle-nav-sidebar')
toggle_button.click()
# Each combination of horizontal/vertical filter with stacked/tabular
# inlines.
tests = [
'admin:admin_inlines_course_add',
'admin:admin_inlines_courseproxy_add',
'admin:admin_inlines_courseproxy1_add',
'admin:admin_inlines_courseproxy2_add',
]
css_selector = '.dynamic-class_set#class_set-%s h2'
for url_name in tests:
with self.subTest(url=url_name):
self.selenium.get(self.live_server_url + reverse(url_name))
# First inline shows the verbose_name.
available, chosen = self.selenium.find_elements_by_css_selector(css_selector % 0)
self.assertEqual(available.text, 'AVAILABLE ATTENDANT')
self.assertEqual(chosen.text, 'CHOSEN ATTENDANT')
# Added inline should also have the correct verbose_name.
self.selenium.find_element_by_link_text('Add another Class').click()
available, chosen = self.selenium.find_elements_by_css_selector(css_selector % 1)
self.assertEqual(available.text, 'AVAILABLE ATTENDANT')
self.assertEqual(chosen.text, 'CHOSEN ATTENDANT')
# Third inline should also have the correct verbose_name.
self.selenium.find_element_by_link_text('Add another Class').click()
available, chosen = self.selenium.find_elements_by_css_selector(css_selector % 2)
self.assertEqual(available.text, 'AVAILABLE ATTENDANT')
self.assertEqual(chosen.text, 'CHOSEN ATTENDANT')
| 48.672353 | 138 | 0.665386 |
6efd785a91dc88d9f32b840eb5a25332fa6c8e97 | 3,873 | py | Python | src/createChimeVCResources/createChimeVCResources.py | aws-samples/amazon-chime-sma-call-forwarding | b5a042a0dc188110c7cd75bb9b2665cd3d26130f | [
"MIT-0"
] | 1 | 2021-12-18T00:12:17.000Z | 2021-12-18T00:12:17.000Z | src/createChimeVCResources/createChimeVCResources.py | aws-samples/amazon-chime-sma-call-forwarding | b5a042a0dc188110c7cd75bb9b2665cd3d26130f | [
"MIT-0"
] | null | null | null | src/createChimeVCResources/createChimeVCResources.py | aws-samples/amazon-chime-sma-call-forwarding | b5a042a0dc188110c7cd75bb9b2665cd3d26130f | [
"MIT-0"
] | null | null | null | import boto3
import time
import uuid
chime = boto3.client("chime")
def get_phone_number():
print("Getting Phone Number")
search_response = chime.search_available_phone_numbers(
# AreaCode='string',
# City='string',
# Country='string',
# TollFreePrefix='string',
State="CA",
MaxResults=1,
)
phone_number_to_order = search_response["E164PhoneNumbers"][0]
print("Phone Number: {}".format(phone_number_to_order))
phone_order = chime.create_phone_number_order(
ProductType="VoiceConnector",
E164PhoneNumbers=[
phone_number_to_order,
],
)
print("Phone Order: {}".format(phone_order))
check_phone_order = chime.get_phone_number_order(
PhoneNumberOrderId=phone_order["PhoneNumberOrder"]["PhoneNumberOrderId"]
)
order_status = check_phone_order["PhoneNumberOrder"]["Status"]
timeout = 0
while not order_status == "Successful":
timeout += 1
print("Checking status: {}".format(order_status))
time.sleep(5)
check_phone_order = chime.get_phone_number_order(
PhoneNumberOrderId=phone_order["PhoneNumberOrder"]["PhoneNumberOrderId"]
)
order_status = check_phone_order["PhoneNumberOrder"]["Status"]
if order_status == "Failed":
raise Exception("Order number failed: {}".format(check_phone_order))
if timeout == 5:
return "Could not get phone number: {}".format(check_phone_order)
print("Phone Number Ordered: {}".format(phone_number_to_order))
return phone_number_to_order
def create_voice_connector(region):
print("Creating Voice Connector")
response = chime.create_voice_connector(
Name="Trunk{}".format(uuid.uuid1()), AwsRegion=region, RequireEncryption=False
)
voice_connector_id = response["VoiceConnector"]["VoiceConnectorId"]
outbound_hostname = response["VoiceConnector"]["OutboundHostName"]
voice_connector = {"voiceConnectorId": voice_connector_id, "outboundHostName": outbound_hostname}
print("Voice Connector Created: {}".format(response))
print("voiceConnector: {}".format(voice_connector))
return voice_connector
def associate_phone_number(voice_connector, phoneNumber):
print("Associating Phone Number: {} with Voice Connector {}".format(phoneNumber, voice_connector))
response = chime.associate_phone_numbers_with_voice_connector(
VoiceConnectorId=voice_connector["voiceConnectorId"],
E164PhoneNumbers=[
phoneNumber,
],
ForceAssociate=True,
)
print("Phone Number associated: {}".format(response))
voice_connector["phoneNumber"] = phoneNumber
return voice_connector
def on_event(event, context):
print(event)
request_type = event["RequestType"]
if request_type == "Create":
return on_create(event)
if request_type == "Update":
return on_update(event)
if request_type == "Delete":
return on_delete(event)
raise Exception("Invalid request type: %s" % request_type)
def on_create(event):
physical_id = "VoiceConnectorResources"
region = event["ResourceProperties"]["region"]
new_phone_number = get_phone_number()
voice_connector = create_voice_connector(region)
voice_connector = associate_phone_number(voice_connector, new_phone_number)
print(str(voice_connector))
return {"PhysicalResourceId": physical_id, "Data": voice_connector}
def on_update(event):
physical_id = event["PhysicalResourceId"]
props = event["ResourceProperties"]
print("Update resource %s with props %s" % (physical_id, props))
return {"PhysicalResourceId": physical_id}
def on_delete(event):
physical_id = event["PhysicalResourceId"]
print("delete resource %s" % physical_id)
return {"PhysicalResourceId": physical_id}
| 34.891892 | 102 | 0.697909 |
913a27654eac5a0e684a9e89a6124dd16461b513 | 6,091 | py | Python | tests/test_utils.py | Maik93/NURBS-Python | 44d87a972c694371462378672429f2acc24249b2 | [
"MIT"
] | 382 | 2016-09-22T16:21:47.000Z | 2022-03-27T18:23:16.000Z | tests/test_utils.py | Maik93/NURBS-Python | 44d87a972c694371462378672429f2acc24249b2 | [
"MIT"
] | 143 | 2017-02-10T03:45:09.000Z | 2022-03-31T05:24:05.000Z | tests/test_utils.py | Maik93/NURBS-Python | 44d87a972c694371462378672429f2acc24249b2 | [
"MIT"
] | 123 | 2016-06-10T14:11:37.000Z | 2022-03-28T11:39:00.000Z | """
Tests for the NURBS-Python package
Released under The MIT License. See LICENSE file for details.
Copyright (c) 2018 Onur Rauf Bingol
Tests geomdl.utilities module. Requires "pytest" to run.
"""
import pytest
from geomdl import utilities
from geomdl import knotvector
from geomdl import control_points
from geomdl import utilities
from geomdl.exceptions import GeomdlException
GEOMDL_DELTA = 10e-6
def test_generate_knot_vector1():
with pytest.raises(ValueError):
degree = 0
num_ctrlpts = 12
knotvector.generate(degree, num_ctrlpts)
def test_generate_knot_vector2():
with pytest.raises(ValueError):
degree = 4
num_ctrlpts = 0
knotvector.generate(degree, num_ctrlpts)
def test_generate_knot_vector3():
with pytest.raises(ValueError):
degree = 0
num_ctrlpts = 0
knotvector.generate(degree, num_ctrlpts)
def test_generate_knot_vector4():
degree = 4
num_ctrlpts = 12
autogen_kv = knotvector.generate(degree, num_ctrlpts)
result = [0.0, 0.0, 0.0, 0.0, 0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0, 1.0, 1.0, 1.0, 1.0]
assert autogen_kv == result
def test_generate_knot_vector5():
# testing auto-generated unclamped knot vector
degree = 3
num_ctrlpts = 5
result = [0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0]
autogen_kv = knotvector.generate(degree, num_ctrlpts, clamped=False)
assert autogen_kv == result
def test_check_knot_vector1():
with pytest.raises(ValueError):
knotvector.check(4, tuple(), 12)
def test_check_knot_vector2():
to_check = knotvector.check(4, (1, 2, 3, 4), 12)
result = False
assert to_check == result
def test_check_knot_vector3():
to_check = knotvector.check(3, (5, 3, 6, 5, 4, 5, 6), 3)
result = False
assert to_check == result
def test_check_knot_vector4():
degree = 4
num_ctrlpts = 12
autogen_kv = knotvector.generate(degree, num_ctrlpts)
check_result = knotvector.check(degree=degree, num_ctrlpts=num_ctrlpts, knot_vector=autogen_kv)
assert check_result
def test_check_knot_vector5():
degree = 4
num_ctrlpts = 12
with pytest.raises(TypeError):
knotvector.check(degree=degree, num_ctrlpts=num_ctrlpts, knot_vector=5)
def test_normalize_knot_vector1():
# check for empty list/tuple
with pytest.raises(ValueError):
knotvector.normalize(tuple())
def test_normalize_knot_vector2():
input_kv = (-5, -5, -3, -2, 2, 3, 5, 5)
output_kv = [0.0, 0.0, 0.2, 0.3, 0.7, 0.8, 1.0, 1.0]
to_check = knotvector.normalize(input_kv)
assert to_check == output_kv
def test_normalize_knot_vector3():
with pytest.raises(TypeError):
knotvector.normalize(5)
def test_check_uv1():
u = -0.1
v = 0.1
assert not utilities.check_params([u, v])
def test_check_uv2():
u = 2
v = 0.1
assert not utilities.check_params([u, v])
def test_check_uv3():
v = -0.1
u = 0.1
assert not utilities.check_params([u, v])
def test_check_uv4():
v = 2
u = 0.1
assert not utilities.check_params([u, v])
def test_color_generator():
seed = 17 # some number to be used as the random seed
result = utilities.color_generator(seed)
to_check = utilities.color_generator(seed)
assert to_check == result
def test_cpman_curve1():
""" Control Points Manager: get-set point (curve) """
pt = [0.0, 0.2, 0.3]
p = 3
sz = 10
cpman = control_points.CurveManager(sz)
cpman.set_ctrlpt(pt, p)
assert cpman.get_ctrlpt(3) == pt
def test_cpman_curve2():
""" Control Points Manager: get empty point (curve) """
p = 5
sz = 12
cpman = control_points.CurveManager(sz)
assert cpman.get_ctrlpt(p) == list()
def test_cpman_curve3():
""" Control Points Manager: check for invalid index """
p = 12
sz = 5
cpman = control_points.CurveManager(sz)
assert cpman.get_ctrlpt(p) == None
def test_cpman_curve4():
""" Control Points Manager: get-set attachment (valid, list) """
d = [0.0, 1.0, 2.0, 3.0]
p = 5
sz = 12
cpman = control_points.CurveManager(sz, testdata=4)
cpman.set_ptdata(dict(testdata=d), p)
retv1 = cpman.get_ptdata('testdata', p)
retv2 = cpman.get_ptdata('testdata', p + 1)
assert retv1[2] == 2.0
assert retv2[2] == 0.0
def test_cpman_curve5():
""" Control Points Manager: get-set attachment (invalid, list) """
d = [0.0, 1.0, 2.0, 3.0]
p = 5
sz = 12
cpman = control_points.CurveManager(sz, testdata=4)
cpman.set_ptdata(dict(testdata=d), p)
retv = cpman.get_ptdata('testdata2', p)
assert retv == None
def test_cpman_curve6():
""" Control Points Manager: get-set attachment (exception) """
with pytest.raises(GeomdlException):
d = [0.0, 1.0, 2.0, 3.0]
p = 5
sz = 12
cpman = control_points.CurveManager(sz, testdata=3)
cpman.set_ptdata(dict(testdata=d), p)
def test_cpman_curve7():
""" Control Points Manager: get-set attachment (valid, float) """
d = 13
p = 5
sz = 12
cpman = control_points.CurveManager(sz, testdata=1)
cpman.set_ptdata(dict(testdata=d), p)
assert cpman.get_ptdata('testdata', 5) == 13
def test_cpman_curve8():
""" Control Points Manager: try to set invalid key """
with pytest.raises(GeomdlException):
d = [0.0, 1.0, 2.0, 3.0]
p = 5
sz = 12
cpman = control_points.CurveManager(sz, testdata=4)
cpman.set_ptdata({'testdata1': d}, p)
def test_cpman_surface1():
""" Control Points Manager: get-set point (surface) """
pt = [1.0, 2.0, 3.0]
p = [2 ,3]
sz = [4, 3]
cpman = control_points.SurfaceManager(*sz)
cpman.set_ctrlpt(pt, *p)
assert cpman.get_ctrlpt(2, 3) == pt
def test_cpman_volume1():
""" Control Points Manager: get-set point (volume) """
pt = [1.0, 2.0, 3.0]
p = [2, 3, 1]
sz = [4, 3, 2]
cpman = control_points.VolumeManager(*sz)
cpman.set_ctrlpt(pt, *p)
assert cpman.get_ctrlpt(2, 3, 1) == pt
| 25.809322 | 108 | 0.646199 |
f0aeeb40162aaa4ca3c1c35865e5e854ae2a8d2f | 7,961 | py | Python | planarity_generation/boltzman_sampler/three_connected_graph.py | TeamNotJava/networkx-related | a045d8a0f66fa365775e4d77ae85e5a1ccb271eb | [
"BSD-3-Clause"
] | 2 | 2018-08-02T15:11:20.000Z | 2018-09-18T13:20:36.000Z | planarity_generation/boltzman_sampler/three_connected_graph.py | TeamNotJava/networkx-related | a045d8a0f66fa365775e4d77ae85e5a1ccb271eb | [
"BSD-3-Clause"
] | 3 | 2018-06-06T15:39:33.000Z | 2018-06-06T15:40:11.000Z | planarity_generation/boltzman_sampler/three_connected_graph.py | TeamNotJava/networkx-related | a045d8a0f66fa365775e4d77ae85e5a1ccb271eb | [
"BSD-3-Clause"
] | 1 | 2018-09-18T13:31:01.000Z | 2018-09-18T13:31:01.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2018 by
# Marta Grobelna <marta.grobelna@rwth-aachen.de>
# Petre Petrov <petrepp4@gmail.com>
# Rudi Floren <rudi.floren@gmail.com>
# Tobias Winkler <tobias.winkler1@rwth-aachen.de>
# All rights reserved.
# BSD license.
#
# Authors: Marta Grobelna <marta.grobelna@rwth-aachen.de>
# Petre Petrov <petrepp4@gmail.com>
# Rudi Floren <rudi.floren@gmail.com>
# Tobias Winkler <tobias.winkler1@rwth-aachen.de>
from .utils import bern_choice
from .binary_tree import BinaryTreeSampler
from .closure import Closure
class ThreeConnectedGraphSampler:
"""Sampler Class for 3-Connected Planar Graphs.
Uses the Binary Tree Sampler for sampling the 3-Connected Planar Graph.
"""
def set_probabilities(self, probabilities):
"""Sets the used probabilities
"""
self.probabilities = probabilities
def set_random(self, random_function):
"""Sets the used random function
"""
self.random_function = random_function
def __init__(self):
self.binary_tree_function = BinaryTreeSampler().binary_tree
self.binary_tree_sampler = BinaryTreeSampler().binary_tree_sampler
self.black_pointed_binary_tree_sampler = BinaryTreeSampler().black_pointed_binary_tree_sampler
self.dy_binary_tree_sampler = BinaryTreeSampler().dy_binary_tree_sampler
self.closure_function = Closure().closure
self.set_probabilities({
'ch_K_in_dyK': [0.1023148450168439782698851645651737890839, 0.8976851549831560217301148354348262109160],
'ch_dxK_in_dxyK': [0.01056720808167383324019751492093784146016, 0.9894327919183261667598024850790621585398],
'ch_b_or_dxb': [0.04311363388351982963897237807044101069163, 0.9568863661164801703610276219295589893083],
'ch_3b_or_dyb': [0.05196496216386519137788597254497784663637, 0.9480350378361348086221140274550221533636]
})
def three_connected_graph(self):
"""Sample a 3-Connected Planar Graph."""
return self.___three_connected_graph()
# Corresponds to
def ___three_connected_graph(self):
return self.draw_k()
def draw_k(self):
while True:
random = self.random_function()
#TODO check this with max size again!! WHY 4??
max_size = int(4 / random)
binary_tree = self.binary_tree_function(max_size)
if binary_tree is not None:
half_edge = self.closure_function(binary_tree)
if half_edge is not None:
return TreeConnectedGraph(half_edge)
def draw_dxK(self):
while True:
binary_tree = self.binary_tree_sampler() # no need of N
number_of_black_nodes = binary_tree.attr['num_black']
number_of_white_nodes = binary_tree.attr['num_white']
reject = (3.0 * (number_of_black_nodes + 1) / (2.0 * (number_of_black_nodes + number_of_white_nodes + 2)))
if reject >= self.random_function():
half_edge = self.closure_function(binary_tree)
if half_edge is not None:
return TreeConnectedGraph(half_edge)
def draw_dyK(self):
if bern_choice(self.probabilities['ch_K_in_dyK'], self.random_function) is 0:
return self.draw_k()
else:
while True:
binary_tree = self.binary_tree_sampler()
half_edge = self.closure_function(binary_tree)
if half_edge is not None:
return TreeConnectedGraph(half_edge)
def draw_dxyK(self):
if bern_choice(self.probabilities['ch_dxK_in_dxyK'], self.random_function) is 0:
return self.draw_dxK()
else:
while True:
if bern_choice(self.probabilities['ch_b_or_dxb'], self.random_function) is 0:
binary_tree = self.binary_tree_sampler()
else:
binary_tree = self.black_pointed_binary_tree_sampler()
half_edge = self.closure_function(binary_tree)
if half_edge is not None:
return TreeConnectedGraph(half_edge)
def draw_dxxK(self):
while True:
black_rooted_binary_tree = self.black_pointed_binary_tree_sampler()
number_of_black_nodes = black_rooted_binary_tree.attr['num_black']
number_of_white_nodes = black_rooted_binary_tree.attr['num_white']
reject = (3.0 * (number_of_black_nodes + 1) / (2.0 * (number_of_black_nodes + number_of_white_nodes + 2)))
if reject >= self.random_function():
half_edge = self.closure_function(black_rooted_binary_tree)
if half_edge is not None:
return TreeConnectedGraph(half_edge)
def draw_dyyK(self):
while True:
if bern_choice(self.probabilities['ch_3b_or_dyb'], self.random_function) is 0:
binary_tree = self.binary_tree_sampler()
else:
binary_tree = self.dy_binary_tree_sampler()
half_edge = self.closure_function(binary_tree)
if half_edge is not None:
return TreeConnectedGraph(half_edge)
class TreeConnectedGraph:
''' A Tree connected graph representation. '''
# Root half edge.
# It, together with its opposite one are not contained in the vertices list.
root_half_edge = None
#Both edges_list and vertices list contain objects from HalfEdge class.
# List of vertices in the 3 connected graph
vertices_list = list()
# List of edges in the 3 connected graph
edges_list = list()
def __init__(self, root_half_edge):
self.root_half_edge = root_half_edge
self.__dfs_three_connected_graph_components_extraction(root_half_edge, root_half_edge, set())
# Given the 3-map closure, this function extract the vertices and the edges from it.
# The root half edge and its opposite one are not part from the vertices set.
# The opposite property from the half edges in the edges_list gives the vertex in the 3 map for connection.
def __dfs_three_connected_graph_components_extraction(self, first_half_edge, root_half_edge, visited_half_edges):
# Check if the half edge has been already processed
if first_half_edge.index in visited_half_edges:
return
# Mark the first half edge and the ones with which it shares a vertex to visited.
visited_half_edges.update(first_half_edge.index)
walker_half_edge = first_half_edge.next
while walker_half_edge != first_half_edge:
visited_half_edges.update(first_half_edge.index)
walker_half_edge = walker_half_edge.next
# Check if the first half edge is different from the root and root.opposite. If this is true,
# than the first half edge is added to a vertices list.
walker_half_edge = first_half_edge
if walker_half_edge != root_half_edge and walker_half_edge.opposite != root_half_edge:
self.vertices_list.append(walker_half_edge)
# Insert half edges to the edges list.
walker_half_edge = walker_half_edge.next
while walker_half_edge != first_half_edge:
# The condition that one half edge should fullfill for inserting.
if walker_half_edge != root_half_edge and walker_half_edge.opposite.index not in visited_half_edges:
self.edges_list.append(walker_half_edge)
walker_half_edge = walker_half_edge.next
# Call the function recursively for the opposite half edges.
walker_half_edge = walker_half_edge.next
while walker_half_edge != first_half_edge:
self.__dfs_three_connected_graph_components_extraction(walker_half_edge.opposite, root_half_edge, visited_half_edges)
walker_half_edge = walker_half_edge.next | 45.232955 | 129 | 0.677302 |
0909ee76725a5394db56ae7d6b25d61013e7bc2b | 39,873 | py | Python | buildscripts/tests/test_update_test_lifecycle.py | puppyofkosh/mongo | ed601dd01169b8c1fad9fb8d388da0523a1b48f5 | [
"Apache-2.0"
] | 1 | 2015-11-08T17:16:08.000Z | 2015-11-08T17:16:08.000Z | buildscripts/tests/test_update_test_lifecycle.py | puppyofkosh/mongo | ed601dd01169b8c1fad9fb8d388da0523a1b48f5 | [
"Apache-2.0"
] | null | null | null | buildscripts/tests/test_update_test_lifecycle.py | puppyofkosh/mongo | ed601dd01169b8c1fad9fb8d388da0523a1b48f5 | [
"Apache-2.0"
] | 1 | 2021-06-18T05:00:06.000Z | 2021-06-18T05:00:06.000Z | """
Tests for buildscripts/update_test_lifecycle.py.
"""
from __future__ import absolute_import
import collections
import copy
import datetime
import unittest
from buildscripts import test_failures
from buildscripts import update_test_lifecycle
from buildscripts.ciconfig import tags as ci_tags
class TestValidateConfig(unittest.TestCase):
"""
Tests for the validate_config() function.
"""
CONFIG = update_test_lifecycle.Config(
test_fail_rates=update_test_lifecycle.Rates(acceptable=0, unacceptable=1),
task_fail_rates=update_test_lifecycle.Rates(acceptable=0, unacceptable=1),
variant_fail_rates=update_test_lifecycle.Rates(acceptable=0, unacceptable=1),
distro_fail_rates=update_test_lifecycle.Rates(acceptable=0, unacceptable=1),
reliable_min_runs=2,
reliable_time_period=datetime.timedelta(days=1),
unreliable_min_runs=2,
unreliable_time_period=datetime.timedelta(days=1))
def test_acceptable_test_fail_rate(self):
"""
Tests the validation of the 'test_fail_rates.acceptable' attribute.
"""
with self.assertRaises(TypeError):
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(acceptable="not a number"))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(acceptable=-1))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(acceptable=2))
update_test_lifecycle.validate_config(config)
def test_unacceptable_test_fail_rate(self):
"""
Tests the validation of the 'test_fail_rates.unacceptable' attribute.
"""
with self.assertRaises(TypeError):
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(unacceptable="not a number"))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(unacceptable=-1))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(unacceptable=2))
update_test_lifecycle.validate_config(config)
def test_test_fail_rates(self):
"""
Tests the validation of the 'test_fail_rates' attribute.
"""
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(acceptable=0.9,
unacceptable=0.1))
update_test_lifecycle.validate_config(config)
def test_acceptable_task_fail_rate(self):
"""
Tests the validation of the 'test_fail_rates.acceptable' attribute.
"""
with self.assertRaises(TypeError):
config = self.CONFIG._replace(
task_fail_rates=self.CONFIG.task_fail_rates._replace(acceptable="not a number"))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
task_fail_rates=self.CONFIG.task_fail_rates._replace(acceptable=-1))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
task_fail_rates=self.CONFIG.task_fail_rates._replace(acceptable=2))
update_test_lifecycle.validate_config(config)
def test_unacceptable_task_fail_rate(self):
"""
Tests the validation of the 'task_fail_rates.unacceptable' attribute.
"""
with self.assertRaises(TypeError):
config = self.CONFIG._replace(
task_fail_rates=self.CONFIG.task_fail_rates._replace(unacceptable="not a number"))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
task_fail_rates=self.CONFIG.task_fail_rates._replace(unacceptable=-1))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
task_fail_rates=self.CONFIG.task_fail_rates._replace(unacceptable=2))
update_test_lifecycle.validate_config(config)
def test_task_fail_rates(self):
"""
Tests the validation of the 'task_fail_rates' attribute.
"""
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
task_fail_rates=self.CONFIG.task_fail_rates._replace(acceptable=0.9,
unacceptable=0.1))
update_test_lifecycle.validate_config(config)
def test_acceptable_variant_fail_rate(self):
"""
Tests the validation of the 'variant_fail_rates.acceptable' attribute.
"""
with self.assertRaises(TypeError):
config = self.CONFIG._replace(
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(
acceptable="not a number"))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(acceptable=-1))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(acceptable=2))
update_test_lifecycle.validate_config(config)
def test_unacceptable_variant_fail_rate(self):
"""
Tests the validation of the 'variant_fail_rates.unacceptable' attribute.
"""
with self.assertRaises(TypeError):
config = self.CONFIG._replace(
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(
unacceptable="not a number"))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(unacceptable=-1))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(unacceptable=2))
update_test_lifecycle.validate_config(config)
def test_variant_fail_rates(self):
"""
Tests the validation of the 'variant_fail_rates' attribute.
"""
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(acceptable=0.9,
unacceptable=0.1))
update_test_lifecycle.validate_config(config)
def test_acceptable_distro_fail_rate(self):
"""
Tests the validation of the 'distro_fail_rates.acceptable' attribute.
"""
with self.assertRaises(TypeError):
config = self.CONFIG._replace(
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(acceptable="not a number"))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(acceptable=-1))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(acceptable=2))
update_test_lifecycle.validate_config(config)
def test_unacceptable_distro_fail_rate(self):
"""
Tests the validation of the 'distro_fail_rates.unacceptable' attribute.
"""
with self.assertRaises(TypeError):
config = self.CONFIG._replace(
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(
unacceptable="not a number"))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(unacceptable=-1))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(unacceptable=2))
update_test_lifecycle.validate_config(config)
def test_distro_fail_rates(self):
"""
Tests the validation of the 'distro_fail_rates' attribute.
"""
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(acceptable=0.9,
unacceptable=0.1))
update_test_lifecycle.validate_config(config)
def test_reliable_min_runs(self):
"""
Tests the validation of the 'reliable_min_runs' attribute.
"""
with self.assertRaises(TypeError):
config = self.CONFIG._replace(reliable_min_runs="not a number")
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(reliable_min_runs=-1)
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(reliable_min_runs=0)
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(reliable_min_runs=1.5)
update_test_lifecycle.validate_config(config)
def test_reliable_time_period(self):
"""
Tests the validation of the 'reliable_time_period' attribute.
"""
with self.assertRaises(TypeError):
config = self.CONFIG._replace(reliable_time_period="not a datetime.timedelta")
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(reliable_time_period=datetime.timedelta(days=-1))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(reliable_time_period=datetime.timedelta(days=0))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(reliable_time_period=datetime.timedelta(days=1, hours=1))
update_test_lifecycle.validate_config(config)
def test_unreliable_min_runs(self):
"""
Tests the validation of the 'unreliable_min_runs' attribute.
"""
with self.assertRaises(TypeError):
config = self.CONFIG._replace(unreliable_min_runs="not a number")
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(unreliable_min_runs=-1)
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(unreliable_min_runs=0)
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(unreliable_min_runs=1.5)
update_test_lifecycle.validate_config(config)
def test_unreliable_time_period(self):
"""
Tests the validation of the 'unreliable_time_period' attribute.
"""
with self.assertRaises(TypeError):
config = self.CONFIG._replace(unreliable_time_period="not a datetime.timedelta")
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(unreliable_time_period=datetime.timedelta(days=-1))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(unreliable_time_period=datetime.timedelta(days=0))
update_test_lifecycle.validate_config(config)
with self.assertRaises(ValueError):
config = self.CONFIG._replace(
unreliable_time_period=datetime.timedelta(days=1, hours=1))
update_test_lifecycle.validate_config(config)
class TestUpdateTags(unittest.TestCase):
"""
Tests for the update_tags() function.
"""
CONFIG = update_test_lifecycle.Config(
test_fail_rates=update_test_lifecycle.Rates(acceptable=0, unacceptable=1),
task_fail_rates=update_test_lifecycle.Rates(acceptable=0, unacceptable=1),
variant_fail_rates=update_test_lifecycle.Rates(acceptable=0, unacceptable=1),
distro_fail_rates=update_test_lifecycle.Rates(acceptable=0, unacceptable=1),
reliable_min_runs=2,
reliable_time_period=datetime.timedelta(days=1),
unreliable_min_runs=2,
unreliable_time_period=datetime.timedelta(days=1))
ENTRY = test_failures.ReportEntry(test="jstests/core/all.js",
task="jsCore_WT",
variant="linux-64",
distro="rhel62",
start_date=datetime.date(2017, 6, 3),
end_date=datetime.date(2017, 6, 3),
num_pass=0,
num_fail=0)
def assert_has_only_js_tests(self, lifecycle):
"""
Raises an AssertionError exception if 'lifecycle' is not of the following form:
selector:
js_test:
...
"""
self.assertIn("selector", lifecycle.raw)
self.assertEqual(1, len(lifecycle.raw), msg=str(lifecycle.raw))
self.assertIn("js_test", lifecycle.raw["selector"])
self.assertEqual(1, len(lifecycle.raw["selector"]), msg=str(lifecycle.raw))
return lifecycle.raw["selector"]["js_test"]
def transition_from_reliable_to_unreliable(self, config, expected_tags):
"""
Tests that update_tags() tags a formerly reliable combination as being unreliable.
"""
initial_tags = collections.OrderedDict()
lifecycle = ci_tags.TagsConfig.from_dict(
dict(selector=dict(js_test=copy.deepcopy(initial_tags))))
summary_lifecycle = update_test_lifecycle.TagsConfigWithChangelog(lifecycle)
self.assertEqual(collections.OrderedDict(), self.assert_has_only_js_tests(lifecycle))
report = test_failures.Report([
self.ENTRY._replace(num_pass=0, num_fail=1),
self.ENTRY._replace(num_pass=0, num_fail=1, task="jsCore"),
self.ENTRY._replace(num_pass=0, num_fail=1, variant="linux-64-debug"),
self.ENTRY._replace(num_pass=1, num_fail=0),
self.ENTRY._replace(num_pass=0, num_fail=1, distro="rhel55"),
])
update_test_lifecycle.validate_config(config)
update_test_lifecycle.update_tags(summary_lifecycle, config, report)
updated_tags = self.assert_has_only_js_tests(lifecycle)
self.assertEqual(updated_tags, expected_tags)
def test_transition_test_from_reliable_to_unreliable(self):
"""
Tests that update_tags() tags a formerly reliable (test,) combination as being unreliable.
"""
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(unacceptable=0.1))
self.transition_from_reliable_to_unreliable(config, collections.OrderedDict([
("jstests/core/all.js", ["unreliable"]),
]))
def test_transition_task_from_reliable_to_unreliable(self):
"""
Tests that update_tags() tags a formerly reliable (test, task) combination as being
unreliable.
"""
config = self.CONFIG._replace(
task_fail_rates=self.CONFIG.task_fail_rates._replace(unacceptable=0.1))
self.transition_from_reliable_to_unreliable(config, collections.OrderedDict([
("jstests/core/all.js", ["unreliable|jsCore_WT"]),
]))
def test_transition_variant_from_reliable_to_unreliable(self):
"""
Tests that update_tags() tags a formerly reliable (test, task, variant) combination as being
unreliable.
"""
config = self.CONFIG._replace(
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(unacceptable=0.1))
self.transition_from_reliable_to_unreliable(config, collections.OrderedDict([
("jstests/core/all.js", ["unreliable|jsCore_WT|linux-64"]),
]))
def test_transition_distro_from_reliable_to_unreliable(self):
"""
Tests that update_tags() tags a formerly reliable (test, task, variant, distro) combination
as being unreliable.
"""
config = self.CONFIG._replace(
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(unacceptable=0.1))
self.transition_from_reliable_to_unreliable(config, collections.OrderedDict([
("jstests/core/all.js", ["unreliable|jsCore_WT|linux-64|rhel62"]),
]))
def test_transition_from_reliable_to_unreliable(self):
"""
Tests that update_tags() tags multiple formerly reliable combination as being unreliable.
"""
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(unacceptable=0.1),
task_fail_rates=self.CONFIG.task_fail_rates._replace(unacceptable=0.1),
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(unacceptable=0.1),
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(unacceptable=0.1))
self.transition_from_reliable_to_unreliable(config, collections.OrderedDict([
("jstests/core/all.js", [
"unreliable",
"unreliable|jsCore_WT",
"unreliable|jsCore_WT|linux-64",
"unreliable|jsCore_WT|linux-64|rhel62",
]),
]))
def transition_from_unreliable_to_reliable(self, config, initial_tags):
"""
Tests that update_tags() untags a formerly unreliable combination after it has become
reliable again.
"""
lifecycle = ci_tags.TagsConfig.from_dict(
dict(selector=dict(js_test=copy.deepcopy(initial_tags))))
summary_lifecycle = update_test_lifecycle.TagsConfigWithChangelog(lifecycle)
self.assertEqual(initial_tags, self.assert_has_only_js_tests(lifecycle))
report = test_failures.Report([
self.ENTRY._replace(num_pass=1, num_fail=0),
self.ENTRY._replace(num_pass=1, num_fail=0, task="jsCore"),
self.ENTRY._replace(num_pass=1, num_fail=0, variant="linux-64-debug"),
self.ENTRY._replace(num_pass=0, num_fail=1),
self.ENTRY._replace(num_pass=1, num_fail=0, distro="rhel55"),
])
update_test_lifecycle.validate_config(config)
update_test_lifecycle.update_tags(summary_lifecycle, config, report)
updated_tags = self.assert_has_only_js_tests(lifecycle)
self.assertEqual(updated_tags, collections.OrderedDict())
def test_transition_test_from_unreliable_to_reliable(self):
"""
Tests that update_tags() untags a formerly unreliable (test,) combination after it has
become reliable again.
"""
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(acceptable=0.9))
self.transition_from_unreliable_to_reliable(config, collections.OrderedDict([
("jstests/core/all.js", ["unreliable"]),
]))
def test_transition_task_from_unreliable_to_reliable(self):
"""
Tests that update_tags() untags a formerly unreliable (test, task) combination after it has
become reliable again.
"""
config = self.CONFIG._replace(
task_fail_rates=self.CONFIG.task_fail_rates._replace(acceptable=0.9))
self.transition_from_unreliable_to_reliable(config, collections.OrderedDict([
("jstests/core/all.js", ["unreliable|jsCore_WT"]),
]))
def test_transition_variant_from_unreliable_to_reliable(self):
"""
Tests that update_tags() untags a formerly unreliable (test, task, variant) combination
after it has become reliable again.
"""
config = self.CONFIG._replace(
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(acceptable=0.9))
self.transition_from_unreliable_to_reliable(config, collections.OrderedDict([
("jstests/core/all.js", ["unreliable|jsCore_WT|linux-64"]),
]))
def test_transition_distro_from_unreliable_to_reliable(self):
"""
Tests that update_tags() untags a formerly unreliable (test, task, variant, distro)
combination after it has become reliable again.
"""
config = self.CONFIG._replace(
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(acceptable=0.9))
self.transition_from_unreliable_to_reliable(config, collections.OrderedDict([
("jstests/core/all.js", ["unreliable|jsCore_WT|linux-64|rhel62"]),
]))
def test_transition_from_unreliable_to_reliable(self):
"""
Tests that update_tags() untags multiple formerly unreliable combination after it has become
reliable again.
"""
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(acceptable=0.9),
task_fail_rates=self.CONFIG.task_fail_rates._replace(acceptable=0.9),
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(acceptable=0.9),
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(acceptable=0.9))
self.transition_from_unreliable_to_reliable(config, collections.OrderedDict([
("jstests/core/all.js", [
"unreliable",
"unreliable|jsCore_WT",
"unreliable|jsCore_WT|linux-64",
"unreliable|jsCore_WT|linux-64|rhel62",
]),
]))
def test_remain_reliable(self):
"""
Tests that update_tags() preserves the absence of tags for reliable combinations.
"""
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(acceptable=0.9),
task_fail_rates=self.CONFIG.task_fail_rates._replace(acceptable=0.9),
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(acceptable=0.9),
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(acceptable=0.9))
initial_tags = collections.OrderedDict()
lifecycle = ci_tags.TagsConfig.from_dict(
dict(selector=dict(js_test=copy.deepcopy(initial_tags))))
summary_lifecycle = update_test_lifecycle.TagsConfigWithChangelog(lifecycle)
self.assertEqual(initial_tags, self.assert_has_only_js_tests(lifecycle))
report = test_failures.Report([
self.ENTRY._replace(num_pass=1, num_fail=0),
self.ENTRY._replace(num_pass=1, num_fail=0, task="jsCore"),
self.ENTRY._replace(num_pass=1, num_fail=0, variant="linux-64-debug"),
self.ENTRY._replace(num_pass=0, num_fail=1),
self.ENTRY._replace(num_pass=1, num_fail=0, distro="rhel55"),
])
update_test_lifecycle.validate_config(config)
update_test_lifecycle.update_tags(summary_lifecycle, config, report)
updated_tags = self.assert_has_only_js_tests(lifecycle)
self.assertEqual(updated_tags, initial_tags)
def test_remain_unreliable(self):
"""
Tests that update_tags() preserves the tags for unreliable combinations.
"""
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(unacceptable=0.1),
task_fail_rates=self.CONFIG.task_fail_rates._replace(unacceptable=0.1),
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(unacceptable=0.1),
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(unacceptable=0.1))
initial_tags = collections.OrderedDict([
("jstests/core/all.js", [
"unreliable",
"unreliable|jsCore_WT",
"unreliable|jsCore_WT|linux-64",
"unreliable|jsCore_WT|linux-64|rhel62",
]),
])
lifecycle = ci_tags.TagsConfig.from_dict(
dict(selector=dict(js_test=copy.deepcopy(initial_tags))))
summary_lifecycle = update_test_lifecycle.TagsConfigWithChangelog(lifecycle)
self.assertEqual(initial_tags, self.assert_has_only_js_tests(lifecycle))
report = test_failures.Report([
self.ENTRY._replace(num_pass=0, num_fail=1),
self.ENTRY._replace(num_pass=0, num_fail=1, task="jsCore"),
self.ENTRY._replace(num_pass=0, num_fail=1, variant="linux-64-debug"),
self.ENTRY._replace(num_pass=1, num_fail=0),
self.ENTRY._replace(num_pass=0, num_fail=1, distro="rhel55"),
])
update_test_lifecycle.validate_config(config)
update_test_lifecycle.update_tags(summary_lifecycle, config, report)
updated_tags = self.assert_has_only_js_tests(lifecycle)
self.assertEqual(updated_tags, initial_tags)
def test_obeys_reliable_min_runs(self):
"""
Tests that update_tags() considers a test reliable if it has fewer than 'reliable_min_runs'.
"""
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(acceptable=0.9),
task_fail_rates=self.CONFIG.task_fail_rates._replace(acceptable=0.9),
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(acceptable=0.9),
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(acceptable=0.9),
reliable_min_runs=100)
self.transition_from_unreliable_to_reliable(config, collections.OrderedDict([
("jstests/core/all.js", [
"unreliable",
"unreliable|jsCore_WT",
"unreliable|jsCore_WT|linux-64",
"unreliable|jsCore_WT|linux-64|rhel62",
]),
]))
def test_obeys_reliable_time_period(self):
"""
Tests that update_tags() ignores passes from before 'reliable_time_period'.
"""
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(acceptable=0.9),
task_fail_rates=self.CONFIG.task_fail_rates._replace(acceptable=0.9),
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(acceptable=0.9),
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(acceptable=0.9))
initial_tags = collections.OrderedDict()
lifecycle = ci_tags.TagsConfig.from_dict(
dict(selector=dict(js_test=copy.deepcopy(initial_tags))))
summary_lifecycle = update_test_lifecycle.TagsConfigWithChangelog(lifecycle)
self.assertEqual(initial_tags, self.assert_has_only_js_tests(lifecycle))
report = test_failures.Report([
self.ENTRY._replace(start_date=(self.ENTRY.start_date - datetime.timedelta(days=1)),
end_date=(self.ENTRY.end_date - datetime.timedelta(days=1)),
num_pass=1,
num_fail=0),
self.ENTRY._replace(start_date=(self.ENTRY.start_date - datetime.timedelta(days=2)),
end_date=(self.ENTRY.end_date - datetime.timedelta(days=2)),
num_pass=1,
num_fail=0),
self.ENTRY._replace(num_pass=0, num_fail=1),
self.ENTRY._replace(num_pass=0, num_fail=1),
self.ENTRY._replace(num_pass=0, num_fail=1, task="jsCore"),
self.ENTRY._replace(num_pass=0, num_fail=1, variant="linux-64-debug"),
self.ENTRY._replace(num_pass=0, num_fail=1, distro="rhel55"),
])
update_test_lifecycle.validate_config(config)
update_test_lifecycle.update_tags(summary_lifecycle, config, report)
updated_tags = self.assert_has_only_js_tests(lifecycle)
self.assertEqual(updated_tags, collections.OrderedDict([
("jstests/core/all.js", [
"unreliable",
"unreliable|jsCore_WT",
"unreliable|jsCore_WT|linux-64",
"unreliable|jsCore_WT|linux-64|rhel62",
]),
]))
def test_obeys_unreliable_min_runs(self):
"""
Tests that update_tags() only considers a test unreliable if it has more than
'unreliable_min_runs'.
"""
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(unacceptable=0.1),
task_fail_rates=self.CONFIG.task_fail_rates._replace(unacceptable=0.1),
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(unacceptable=0.1),
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(unacceptable=0.1),
unreliable_min_runs=100)
initial_tags = collections.OrderedDict()
lifecycle = ci_tags.TagsConfig.from_dict(
dict(selector=dict(js_test=copy.deepcopy(initial_tags))))
summary_lifecycle = update_test_lifecycle.TagsConfigWithChangelog(lifecycle)
self.assertEqual(initial_tags, self.assert_has_only_js_tests(lifecycle))
report = test_failures.Report([
self.ENTRY._replace(num_pass=0, num_fail=1),
self.ENTRY._replace(num_pass=0, num_fail=1, task="jsCore"),
self.ENTRY._replace(num_pass=0, num_fail=1, variant="linux-64-debug"),
self.ENTRY._replace(num_pass=1, num_fail=0),
self.ENTRY._replace(num_pass=0, num_fail=1, distro="rhel55"),
])
update_test_lifecycle.validate_config(config)
update_test_lifecycle.update_tags(summary_lifecycle, config, report)
updated_tags = self.assert_has_only_js_tests(lifecycle)
self.assertEqual(updated_tags, initial_tags)
def test_obeys_unreliable_time_period(self):
"""
Tests that update_tags() ignores failures from before 'unreliable_time_period'.
"""
config = self.CONFIG._replace(
test_fail_rates=self.CONFIG.test_fail_rates._replace(unacceptable=0.1),
task_fail_rates=self.CONFIG.task_fail_rates._replace(unacceptable=0.1),
variant_fail_rates=self.CONFIG.variant_fail_rates._replace(unacceptable=0.1),
distro_fail_rates=self.CONFIG.distro_fail_rates._replace(unacceptable=0.1))
initial_tags = collections.OrderedDict([
("jstests/core/all.js", [
"unreliable",
"unreliable|jsCore_WT",
"unreliable|jsCore_WT|linux-64",
"unreliable|jsCore_WT|linux-64|rhel62",
]),
])
lifecycle = ci_tags.TagsConfig.from_dict(
dict(selector=dict(js_test=copy.deepcopy(initial_tags))))
summary_lifecycle = update_test_lifecycle.TagsConfigWithChangelog(lifecycle)
self.assertEqual(initial_tags, self.assert_has_only_js_tests(lifecycle))
report = test_failures.Report([
self.ENTRY._replace(start_date=(self.ENTRY.start_date - datetime.timedelta(days=1)),
end_date=(self.ENTRY.end_date - datetime.timedelta(days=1)),
num_pass=0,
num_fail=1),
self.ENTRY._replace(start_date=(self.ENTRY.start_date - datetime.timedelta(days=2)),
end_date=(self.ENTRY.end_date - datetime.timedelta(days=2)),
num_pass=0,
num_fail=1),
self.ENTRY._replace(num_pass=1, num_fail=0),
self.ENTRY._replace(num_pass=1, num_fail=0),
self.ENTRY._replace(num_pass=1, num_fail=0, task="jsCore"),
self.ENTRY._replace(num_pass=1, num_fail=0, variant="linux-64-debug"),
self.ENTRY._replace(num_pass=1, num_fail=0, distro="rhel55"),
])
update_test_lifecycle.validate_config(config)
update_test_lifecycle.update_tags(summary_lifecycle, config, report)
updated_tags = self.assert_has_only_js_tests(lifecycle)
self.assertEqual(updated_tags, collections.OrderedDict())
class TestCleanUpTags(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.evg = MockEvergreenConfig(["task1", "task2", "task3"],
{"variant1": {"tasks": ["task1", "task2"],
"distros": ["distro1"]},
"variant2": {"tasks": ["task3"],
"distros": ["distro2"]}})
def test_is_unreliable_tag_relevant(self):
self.assertTrue(update_test_lifecycle._is_tag_still_relevant(self.evg, "unreliable"))
def test_is_unknown_task_relevant(self):
self.assertFalse(update_test_lifecycle._is_tag_still_relevant(
self.evg, "unreliable|task_unknown"))
def test_is_known_task_relevant(self):
self.assertTrue(update_test_lifecycle._is_tag_still_relevant(
self.evg, "unreliable|task1"))
self.assertTrue(update_test_lifecycle._is_tag_still_relevant(
self.evg, "unreliable|task2"))
self.assertTrue(update_test_lifecycle._is_tag_still_relevant(
self.evg, "unreliable|task3"))
def test_is_unknown_variant_relevant(self):
self.assertFalse(update_test_lifecycle._is_tag_still_relevant(
self.evg, "unreliable|task1|variant3"
))
def test_is_unknown_task_variant_relevant(self):
self.assertFalse(update_test_lifecycle._is_tag_still_relevant(
self.evg, "unreliable|task3|variant1"))
self.assertFalse(update_test_lifecycle._is_tag_still_relevant(
self.evg, "unreliable|task1|variant2"))
def test_is_known_task_variant_relevant(self):
self.assertTrue(update_test_lifecycle._is_tag_still_relevant(
self.evg, "unreliable|task1|variant1"))
self.assertTrue(update_test_lifecycle._is_tag_still_relevant(
self.evg, "unreliable|task2|variant1"))
self.assertTrue(update_test_lifecycle._is_tag_still_relevant(
self.evg, "unreliable|task3|variant2"))
def test_is_unknown_task_variant_distro_relevant(self):
self.assertFalse(update_test_lifecycle._is_tag_still_relevant(
self.evg, "unreliable|task1|variant1|distro2"))
self.assertFalse(update_test_lifecycle._is_tag_still_relevant(
self.evg, "unreliable|task3|variant2|distro1"))
def test_is_known_task_variant_distro_relevant(self):
self.assertTrue(update_test_lifecycle._is_tag_still_relevant(
self.evg, "unreliable|task1|variant1|distro1"))
self.assertTrue(update_test_lifecycle._is_tag_still_relevant(
self.evg, "unreliable|task3|variant2|distro2"))
class MockEvergreenConfig(object):
def __init__(self, tasks, variants):
self.task_names = tasks
self.variants = {}
for name, fields in variants.items():
self.variants[name] = MockVariant(fields["tasks"], fields["distros"])
def get_variant(self, variant_name):
return self.variants.get(variant_name)
class MockVariant(object):
def __init__(self, task_names, distros):
self.task_names = task_names
self.distros = distros
class TestJiraIssueCreator(unittest.TestCase):
def test_description(self):
data = {"js_test": {"testfile1": {"tag1": 0.1, "tag2": 0.2},
"testfile2": {"tag1": 0.1, "tag3": 0.3}}}
desc = update_test_lifecycle.JiraIssueCreator._make_updated_tags_description(data)
expected = ("- *js_test*\n"
"-- {{testfile1}}\n"
"--- {{tag1}} (0.10 %)\n"
"--- {{tag2}} (0.20 %)\n"
"-- {{testfile2}}\n"
"--- {{tag1}} (0.10 %)\n"
"--- {{tag3}} (0.30 %)")
self.assertEqual(expected, desc)
def test_description_empty(self):
data = {}
desc = update_test_lifecycle.JiraIssueCreator._make_updated_tags_description(data)
expected = "_None_"
self.assertEqual(expected, desc)
def test_clean_up_description(self):
data = {"js_test": {"testfile1": ["tag1", "tag2"],
"testfile2": []}}
desc = update_test_lifecycle.JiraIssueCreator._make_tags_cleaned_up_description(data)
expected = ("- *js_test*\n"
"-- {{testfile1}}\n"
"--- {{tag1}}\n"
"--- {{tag2}}\n"
"-- {{testfile2}}\n"
"--- ALL (test file removed or renamed as part of an earlier commit)")
self.assertEqual(expected, desc)
def test_clean_up_description_empty(self):
data = {}
desc = update_test_lifecycle.JiraIssueCreator._make_tags_cleaned_up_description(data)
expected = "_None_"
self.assertEqual(expected, desc)
class TestTagsConfigWithChangelog(unittest.TestCase):
def setUp(self):
lifecycle = ci_tags.TagsConfig({"selector": {}})
self.summary_lifecycle = update_test_lifecycle.TagsConfigWithChangelog(lifecycle)
def test_add_tag(self):
self.summary_lifecycle.add_tag("js_test", "testfile1", "tag1", 0.1)
self.assertEqual({"js_test": {"testfile1": {"tag1": 0.1}}}, self.summary_lifecycle.added)
def test_remove_tag(self):
self.summary_lifecycle.lifecycle.add_tag("js_test", "testfile1", "tag1")
self.summary_lifecycle.remove_tag("js_test", "testfile1", "tag1", 0.1)
self.assertEqual({"js_test": {"testfile1": {"tag1": 0.1}}}, self.summary_lifecycle.removed)
def test_add_remove_tag(self):
self.summary_lifecycle.add_tag("js_test", "testfile1", "tag1", 0.1)
self.summary_lifecycle.remove_tag("js_test", "testfile1", "tag1", 0.4)
self.assertEqual({}, self.summary_lifecycle.added)
self.assertEqual({}, self.summary_lifecycle.removed)
def test_remove_add_tag(self):
self.summary_lifecycle.lifecycle.add_tag("js_test", "testfile1", "tag1")
self.summary_lifecycle.remove_tag("js_test", "testfile1", "tag1", 0.1)
self.summary_lifecycle.add_tag("js_test", "testfile1", "tag1", 0.1)
self.assertEqual({}, self.summary_lifecycle.added)
self.assertEqual({}, self.summary_lifecycle.removed)
| 43.912996 | 100 | 0.653299 |
11ff0e051cd21d48f5f490c258d486bda56b560e | 346 | py | Python | Intro/arrayMaxConsecutiveSum.py | sumeyaali/Code_Challenges | e130e9a2bd94f9928e92474e54736409aee69384 | [
"MIT"
] | null | null | null | Intro/arrayMaxConsecutiveSum.py | sumeyaali/Code_Challenges | e130e9a2bd94f9928e92474e54736409aee69384 | [
"MIT"
] | null | null | null | Intro/arrayMaxConsecutiveSum.py | sumeyaali/Code_Challenges | e130e9a2bd94f9928e92474e54736409aee69384 | [
"MIT"
] | null | null | null | def arrayMaxConsecutiveSum(array, k):
# iterate through the input Array
# k_sum is the sum of the first two numbers added together
k_sum = sum(array[0:k])
max_sum = k_sum
for i in range(k,len(array)):
k_sum = k_sum - array[i-k] + array[i]
if k_sum > max_sum:
max_sum = k_sum
return max_sum | 34.6 | 62 | 0.615607 |
f3679fe0c74415a09d3e43da9db47a0aeb731da5 | 19,943 | py | Python | plugins/tweettrans.py | chenxuan353/tweetToBot | 1b930c4eb287bd2e8d9ebc5bbe0ef45fcd221a32 | [
"MIT"
] | 15 | 2020-08-09T13:02:40.000Z | 2022-01-01T23:34:02.000Z | plugins/tweettrans.py | chenxuan353/tweetToBot | 1b930c4eb287bd2e8d9ebc5bbe0ef45fcd221a32 | [
"MIT"
] | 2 | 2020-08-10T04:43:16.000Z | 2020-09-22T02:10:52.000Z | plugins/tweettrans.py | chenxuan353/tweetToBot | 1b930c4eb287bd2e8d9ebc5bbe0ef45fcd221a32 | [
"MIT"
] | 5 | 2020-08-09T13:02:41.000Z | 2021-01-14T04:17:11.000Z | # -*- coding: UTF-8 -*-
from nonebot import on_command, CommandSession,NoticeSession,on_notice,permission as perm
from helper import getlogger,msgSendToBot,CQsessionToStr,TempMemory,argDeal,data_read,data_save
from module.twitter import decode_b64,encode_b64,mintweetID
from plugins.twitter import tweet_event_deal
from module.tweettrans import TweetTrans,rate_limit_bucket
import nonebot
import time
import asyncio
import os
import traceback
import re
import module.permissiongroup as permissiongroup
import config
logger = getlogger(__name__)
__plugin_name__ = '烤推'
__plugin_usage__ = r"""
烤推指令前端
"""
#线程池
from concurrent.futures import ThreadPoolExecutor
pool = ThreadPoolExecutor(max_workers=64,thread_name_prefix="trans_Threads")
#烤推列表缓存
trans_tmemory = TempMemory('trans_tmemory.json',limit=300,autoload=True,autosave=True)
#烤推权限
permgroupname = 'transtweet'
permissiongroup.perm_addLegalPermGroup(__name__,'烤推模块',permgroupname)
permissiongroup.perm_addLegalPermUnit(permgroupname,'switch') #烤推切换权限
permissiongroup.perm_addLegalPermUnit(permgroupname,'trans') #烤推权限
trans_img_path = os.path.join(config.trans_img_path,'transtweet','transimg','')
transtemplate_filename = 'transtemplate.json'
transtemplate = {
#默认模版
'0':'<p dir="auto" style="color:#1DA1F2;font-size:0.7em;font-weight: 600;">翻译自日文</p>'
}
def loadTranstemplate():
global transtemplate
res = data_read(transtemplate_filename)
if res[0]:
transtemplate = res[2]
return res
def transtemplateInit():
global transtemplate
res = loadTranstemplate()
if not res[0]:
data_save(transtemplate_filename,transtemplate)
transtemplateInit()
def setTranstemplate(key,value):
transtemplate[key] = value
data_save(transtemplate_filename,transtemplate)
def perm_check(session: CommandSession,permunit:str,Remotely:dict = None,user:bool = False):
if Remotely != None:
return permissiongroup.perm_check(
Remotely['message_type'],
Remotely['sent_id'],
permgroupname,
permunit
)
elif user:
return permissiongroup.perm_check(
'private',
session.event['user_id'],
permgroupname,
permunit
)
return permissiongroup.perm_check(
session.event['message_type'],
(session.event['group_id'] if session.event['message_type'] == 'group' else session.event['user_id']),
permgroupname,
permunit
)
def perm_del(session: CommandSession,permunit:str,Remotely:dict = None):
if Remotely != None:
return permissiongroup.perm_del(
Remotely['message_type'],
Remotely['sent_id'],
Remotely['op_id'],
permgroupname,
permunit
)
return permissiongroup.perm_del(
session.event['message_type'],
(session.event['group_id'] if session.event['message_type'] == 'group' else session.event['user_id']),
session.event['user_id'],
permgroupname,
permunit
)
def perm_add(session: CommandSession,permunit:str,Remotely:dict = None):
if Remotely != None:
return permissiongroup.perm_add(
Remotely['message_type'],
Remotely['sent_id'],
Remotely['op_id'],
permgroupname,
permunit
)
return permissiongroup.perm_add(
session.event['message_type'],
(session.event['group_id'] if session.event['message_type'] == 'group' else session.event['user_id']),
session.event['user_id'],
permgroupname,
permunit
)
#预处理
def headdeal(session: CommandSession):
if session.event['message_type'] == "group" and session.event.sub_type != 'normal':
return False
return True
@on_command('transReloadTemplate',aliases=['重载烤推模版'], permission=perm.SUPERUSER,only_to_me = True)
async def transReloadTemplate(session: CommandSession):
if not headdeal(session):
return
res = loadTranstemplate()
if res[0]:
await session.send('重载成功')
else:
await session.send(res[1])
async def transswitch_group(session: CommandSession):
if perm_check(session,'-switch',user = True):
await session.send('操作被拒绝,权限不足(p)')
return
if perm_check(session,'-switch'):
await session.send('操作被拒绝,权限不足(g)')
return
if perm_check(session,'*'):
await session.send('操作无效,存在“*”权限')
return
if perm_check(session,'trans'):
perm_del(session,'trans')
await session.send('烤推授权关闭')
else:
perm_add(session,'trans')
await session.send('烤推授权开启')
async def transswitch_private(session: CommandSession):
user_id = session.event['user_id']
arglimit = [
{
'name':'msgtype', #参数名
'des':'消息类型', #参数错误描述
'type':'str', #参数类型int float str list dict (list与dict需要使用函数或正则表达式进行二次处理)
'strip':True, #是否strip
'lower':True, #是否转换为小写
'default':None, #默认值
'func':None, #函数,当存在时使用函数进行二次处理
're':None, #正则表达式匹配(match函数)
'vlimit':{
#参数限制表(限制参数内容,空表则不限制),'*':''表示允许任意字符串,值不为空时任意字符串将被转变为这个值
#'私聊':'private',
#'private':'private',
'群聊':'group',
'group':'group',
#'好友':'private',
'群':'group',
}
},
{
'name':'send_id', #参数名
'des':'对象ID', #参数错误描述
'type':'int', #参数类型int float str list dict (list与dict需要使用函数或正则表达式进行二次处理)
'strip':True, #是否strip
'lower':False, #是否转换为小写
'default':None, #默认值
'func':None, #函数,当存在时使用函数进行二次处理
're':None, #正则表达式匹配(match函数)
'vlimit':{
#参数限制表(限制参数内容,空表则不限制),'*':''表示匹配任意字符串,值不为空时任意字符串将被转变为这个值
}
}
]
res = argDeal(session.current_arg_text.strip(),arglimit)
if not res[0]:
await session.send(res[1]+'=>'+res[2])
return
args = res[1]
remote = {
'message_type':'group',
'sent_id':args['sendid'],
'op_id':user_id
}
if perm_check(session,'-switch'):
await session.send('操作被拒绝,权限不足(p)')
return
if perm_check(session,'-switch',remote):
await session.send('操作被拒绝,权限不足(g)')
return
if perm_check(session,'*',remote):
await session.send('操作无效,存在“*”权限(g)')
return
if perm_check(session,'trans',remote):
perm_del(session,'trans',remote)
await session.send('烤推授权关闭')
else:
perm_add(session,'trans',)
await session.send('烤推授权开启')
@on_command('transswitch',aliases=['ts','烤推授权'], permission=perm.SUPERUSER,only_to_me = True)
async def transswitch(session: CommandSession):
if not headdeal(session):
return
message_type = session.event['message_type']
if message_type == 'group':
await transswitch_group(session)
else:
await transswitch_private(session)
def deal_trans(arg,ad) -> dict:
trans = {
'type_html':'',
'source':arg,
'text':{}
}
tests = arg.split('##')
if len(tests) == 1:
kvc = tests[0].partition("#!")
trans['text']['main'] = []
trans['text']['main'].append(kvc[0].strip())
if kvc[2] != '':
trans['text']['main'].append(kvc[2].strip())
else:
for test in tests:
test = test.strip()
if test == '':
continue
kv = re.findall(r'^([0-9]{1,2}|main|m)\s{1}(.+)',test,re.S)
if kv == []:
return None #格式不正确
kv = list(kv[0])
if kv[0].isnumeric():
kv[0] = str(int(kv[0]))
elif kv[0] == 'm':
kv[0] = 'main'
kvc = kv[1].partition("#!")
trans['text'][kv[0]] = []
trans['text'][kv[0]].append(kvc[0].strip())
if kvc[2] != '':
trans['text'][kv[0]].append(kvc[2].strip())
return trans
def send_msg(session: CommandSession,msg):
session.bot.sync.send_msg(self_id=session.self_id,group_id=session.event['group_id'],message=msg)
def send_res(session: CommandSession,args):
global transtemplate
group_id =session.event['group_id']
user_id = session.event['user_id']
tweet_id = args['tweet_id']
trans = args['trans']
try:
#使用64进制减少长度
tasktype = encode_b64(int(time.time()),offset = 0)
type_html = transtemplate['0']
if str(group_id) in transtemplate:
type_html = transtemplate[str(group_id)]
trans['type_html'] = type_html
#检查推文缓存
tweet_sname = 's'
tweet = tweet_event_deal.tryGetTweet(tweet_id)
if tweet != None:
logger.info('检测到缓存:' + tweet['id_str'] + '(' + tweet['user']['screen_name'] + ')')
#logger.info(tweet)
tweet_cache = tweet
tweet_sname = tweet_cache['user']['screen_name']
tt = TweetTrans()
res = tt.getTransFromTweetID(
str(tweet_id),
args['trans'],
tweet_sname,
encode_b64(group_id,offset=0)+'-'+str(tasktype)
)
if res[0]:
time.sleep(1)
if 'nickname' in session.event.sender:
nick = session.event.sender['nickname']
else:
nick = str(user_id)
trans_tmemory.join({
'id':tweet_id,
'group':group_id,
'mintrans':trans['source'][0:15].replace("\n"," "),
'tweetid':encode_b64(tweet_id),
'tasktype':tasktype,
'trans':trans,
'op':user_id,
'opnick':nick
})
send_msg(session,
trans_img_path + encode_b64(group_id,offset=0)+'-'+str(tasktype) + '.png' +"\n" + \
str('[CQ:image,timeout=' + config.img_time_out + \
',file='+trans_img_path + encode_b64(group_id,offset=0)+'-'+str(tasktype) + '.png' + ']') + "\n"\
"使用 !tl 查看烤推历史"
)
else:
send_msg(session,"错误,"+res[2])
del tt
except:
s = traceback.format_exc(limit=10)
logger.error(s)
send_msg(session,"错误,烤推服务异常!")
@on_command('trans',aliases=['t','烤推'], permission=perm.SUPERUSER | perm.PRIVATE_FRIEND | perm.GROUP_OWNER | perm.GROUP,only_to_me = False)
async def trans(session: CommandSession):
if not headdeal(session):
return
message_type = session.event['message_type']
#group_id = (session.event['group_id'] if message_type == 'group' else None)
#user_id = session.event['user_id']
if message_type != 'group':
return
if perm_check(session,'-switch',user = True):
await session.send('操作被拒绝,权限不足(p)')
return
if not perm_check(session,'trans'):
await session.send('操作未授权')
return
logger.info(CQsessionToStr(session))
if not rate_limit_bucket.consume(1):
await session.send("烤推繁忙,请稍后再试")
return
def checkTweetId(a,ad):
if a[:1] == '#':
ta = a[1:]
if not ta.isdecimal():
return None
res = mintweetID.find(lambda item,val:item[1]==val,int(ta))
if res == None:
return None
return res[0]
elif a.isdecimal() and int(a) > 1253881609540800000:
return a
else:
res = decode_b64(a)
if res == -1:
return None
return res
arglimit = [
{
'name':'tweet_id', #参数名
'des':'推特ID', #参数错误描述
'type':'int', #参数类型int float str list dict (list与dict需要使用函数或正则表达式进行二次处理)
'strip':True, #是否strip
'lower':False, #是否转换为小写
'default':None, #默认值
'func':checkTweetId, #函数,当存在时使用函数进行二次处理
're':None, #正则表达式匹配(match函数)
'vlimit':{
#参数限制表(限制参数内容,空表则不限制),'*':''表示匹配任意字符串,值不为空时任意字符串将被转变为这个值
}
},{
'name':'trans', #参数名
'des':'翻译内容', #参数错误描述
'type':'dict', #参数类型int float str list dict (list与dict需要使用函数或正则表达式进行二次处理)
'strip':True, #是否strip
'lower':False, #是否转换为小写
'default':{
'type_html':'',
'source':'',
'text':{}
}, #默认值
'func':deal_trans, #函数,当存在时使用函数进行二次处理
're':None, #正则表达式匹配(match函数)
'vlimit':{
#参数限制表(限制参数内容,空表则不限制),'*':''表示匹配任意字符串,值不为空时任意字符串将被转变为这个值
}
}
]
args = argDeal(session.current_arg_text.strip(),arglimit)
if not args[0]:
await session.send(args[1] + '=>' + args[2])
return
pool.submit(send_res,session,args[1])
await session.send("图片合成中...")
def getlist(groupid:int,page:int=1):
ttm = trans_tmemory.tm.copy()
length = len(ttm)
cout = 0
s = "昵称,任务标识,推文标识,翻译简写\n"
for i in range(length - 1,-1,-1):
if ttm[i]['group'] == groupid:
if cout >= (page-1)*5 and cout < (page)*5:
s = s + str(ttm[i]['opnick'] if ttm[i]['opnick'] else ttm[i]['op']) + ',' + ttm[i]['tasktype'] + ',' + ttm[i]['tweetid'] + ',' + ttm[i]['mintrans'] + "\n"
cout = cout + 1
totalpage = (cout)//5 + (0 if cout%5 == 0 else 1)
s = s + '页数:'+str(page)+'/'+str(totalpage)+'总记录数:'+str(cout) + '\n'
s = s + '使用!tgt 任务标识 获取指定任务图片' + "\n"
s = s + '使用!gt 推文标识 获取指定推文最后的译文图片'
return s
@on_command('translist',aliases=['tl','烤推列表'], permission=perm.SUPERUSER | perm.PRIVATE_FRIEND | perm.GROUP_OWNER | perm.GROUP,only_to_me = False)
async def translist(session: CommandSession):
if not headdeal(session):
return
message_type = session.event['message_type']
group_id = (session.event['group_id'] if message_type == 'group' else None)
#user_id = session.event['user_id']
if message_type != 'group':
return
if perm_check(session,'-switch',user = True):
await session.send('操作被拒绝,权限不足(p)')
return
if not perm_check(session,'trans'):
await session.send('操作未授权')
return
logger.info(CQsessionToStr(session))
arglimit = [
{
'name':'page', #参数名
'des':'页码', #参数错误描述
'type':'int', #参数类型int float str list dict (list与dict需要使用函数或正则表达式进行二次处理)
'strip':True, #是否strip
'lower':False, #是否转换为小写
'default':1, #默认值
'func':None, #函数,当存在时使用函数进行二次处理
're':None, #正则表达式匹配(match函数)
'vlimit':{
#参数限制表(限制参数内容,空表则不限制),'*':''表示匹配任意字符串,值不为空时任意字符串将被转变为这个值
}
}
]
args = argDeal(session.current_arg_text.strip(),arglimit)
if not args[0]:
await session.send(args[1] + '=>' + args[2])
return
args = args[1]
page = args['page']
if page < 1:
await session.send("页码不能为负")
return
s = getlist(group_id,page)
await session.send(s)
@on_command('gettrans',aliases=['gt','获取翻译'], permission=perm.SUPERUSER | perm.PRIVATE_FRIEND | perm.GROUP_OWNER | perm.GROUP,only_to_me = False)
async def gettrans(session: CommandSession):
if not headdeal(session):
return
message_type = session.event['message_type']
#group_id = (session.event['group_id'] if message_type == 'group' else None)
#user_id = session.event['user_id']
if message_type != 'group':
return
if perm_check(session,'-switch',user = True):
await session.send('操作被拒绝,权限不足(p)')
return
if not perm_check(session,'trans'):
await session.send('操作未授权')
return
logger.info(CQsessionToStr(session))
def checkTweetId(a,ad):
if a[:1] == '#':
ta = a[1:]
if not ta.isdecimal():
return None
res = mintweetID.find(lambda item,val:item[1]==val,int(ta))
if res == None:
return None
return res[0]
elif a.isdecimal() and int(a) > 1253881609540800000:
return a
else:
res = decode_b64(a)
if res == -1:
return None
return res
arglimit = [
{
'name':'tweet_id', #参数名
'des':'推特ID', #参数错误描述
'type':'int', #参数类型int float str list dict (list与dict需要使用函数或正则表达式进行二次处理)
'strip':True, #是否strip
'lower':False, #是否转换为小写
'default':None, #默认值
'func':checkTweetId, #函数,当存在时使用函数进行二次处理
're':None, #正则表达式匹配(match函数)
'vlimit':{
#参数限制表(限制参数内容,空表则不限制),'*':''表示匹配任意字符串,值不为空时任意字符串将被转变为这个值
}
}
]
args = argDeal(session.current_arg_text.strip(),arglimit)
if not args[0]:
await session.send(args[1] + '=>' + args[2])
return
args = args[1]
tweet_id = args['tweet_id']
ttm = trans_tmemory.tm.copy()
length = len(ttm)
for i in range(length - 1,-1,-1):
if ttm[i]['id'] == tweet_id:
await session.send(trans_img_path + encode_b64(ttm[i]['group'],offset=0)+'-'+str(ttm[i]['tasktype']) + '.png' +"\n" + \
str('[CQ:image,timeout=' + config.img_time_out + \
',file='+trans_img_path + encode_b64(ttm[i]['group'],offset=0)+'-'+str(ttm[i]['tasktype']) + '.png' + ']'))
return
await session.send("未查找到推文翻译")
@on_command('typeGettrans',aliases=['tgt'], permission=perm.SUPERUSER | perm.PRIVATE_FRIEND | perm.GROUP_OWNER | perm.GROUP,only_to_me = False)
async def typeGettrans(session: CommandSession):
if not headdeal(session):
return
message_type = session.event['message_type']
#group_id = (session.event['group_id'] if message_type == 'group' else None)
#user_id = session.event['user_id']
if message_type != 'group':
return
if perm_check(session,'-switch',user = True):
await session.send('操作被拒绝,权限不足(p)')
return
if not perm_check(session,'trans'):
await session.send('操作未授权')
return
logger.info(CQsessionToStr(session))
arg = session.current_arg_text.strip()
if arg == '':
await session.send('缺少参数')
return
ttm = trans_tmemory.tm.copy()
length = len(ttm)
for i in range(length - 1,-1,-1):
if ttm[i]['tasktype'] == arg:
await session.send(trans_img_path + encode_b64(ttm[i]['group'],offset=0)+'-'+str(ttm[i]['tasktype']) + '.png' +"\n" + \
str('[CQ:image,timeout=' + config.img_time_out + \
',file='+trans_img_path + encode_b64(ttm[i]['group'],offset=0)+'-'+str(ttm[i]['tasktype']) + '.png' + ']'))
return
await session.send("未查找到推文翻译")
@on_command('transabout',aliases=['ta','烤推帮助'],only_to_me = False)
async def transabout(session: CommandSession):
if not headdeal(session):
return
message_type = session.event['message_type']
if message_type != 'group':
return
res = perm_check(session,'trans')
logger.info(CQsessionToStr(session))
msg = '当前版本为烤推机测试版V2.33' + "\n" + \
'授权状态:' + ("已授权" if res else "未授权") + "\n" + \
'!ts -切换烤推授权' + "\n" + \
'!t 推文ID 翻译 -合成翻译' + "\n" + \
'!tl -已翻译推文列表' + "\n" + \
'!gt 推文ID/推文标识 -获取最后翻译' + "\n" + \
'!tgt 任务标识 -获取指定翻译' + "\n" + \
'!gtt 推文ID/推文标识 -获取指定推文内容' + "\n" + \
'多层回复翻译:' + "\n" + \
'##1 第一层翻译' + "\n" + \
'#! 第一层层内推文(转推并评论类型里的内嵌推文)' + "\n" + \
'##2 第二层翻译' + "\n" + \
'##main 主翻译' + "\n" + \
'烤推支持换行参数,如有需要可以更换翻译自日文到任意图片或文字' + "\n" + \
'如果出现问题可以 !反馈 反馈内容 反馈信息'
await session.send(msg) | 35.933333 | 171 | 0.562704 |
6288ac0fdd26575c92d10ae5072077a498932e27 | 6,206 | py | Python | ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapred_service_check.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 371 | 2015-01-19T05:42:51.000Z | 2022-03-27T14:46:52.000Z | ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapred_service_check.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 491 | 2015-03-10T17:25:02.000Z | 2022-03-30T12:22:44.000Z | ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapred_service_check.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 395 | 2015-01-02T20:53:01.000Z | 2022-03-21T08:49:08.000Z | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import sys
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
from resource_management.libraries.functions.format import format
from resource_management.core.resources.system import Execute, File
from resource_management.core.source import StaticFile
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
from resource_management.core.logger import Logger
class MapReduce2ServiceCheck(Script):
def service_check(self, env):
pass
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class MapReduce2ServiceCheckWindows(MapReduce2ServiceCheck):
def service_check(self, env):
import params
env.set_params(params)
component_type = 'hs'
if params.hadoop_ssl_enabled:
component_address = params.hs_webui_address
else:
component_address = params.hs_webui_address
validateStatusFileName = "validateYarnComponentStatusWindows.py"
validateStatusFilePath = os.path.join(os.path.dirname(params.hadoop_home), "temp", validateStatusFileName)
python_executable = sys.executable
validateStatusCmd = "{0} {1} {2} -p {3} -s {4}".format(
python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
if params.security_enabled:
kinit_cmd = "{0} -kt {1} {2};".format(params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
smoke_cmd = kinit_cmd + validateStatusCmd
else:
smoke_cmd = validateStatusCmd
File(validateStatusFilePath,
content=StaticFile(validateStatusFileName)
)
Execute(smoke_cmd,
tries=3,
try_sleep=5,
logoutput=True
)
# hadoop_exe = os.path.join(params.hadoop_home, "bin", "hadoop")
#
# tested_file = os.path.join(params.hadoop_home, "bin", "hadoop.cmd")
# jar_path = os.path.join(params.hadoop_mapred2_jar_location, params.hadoopMapredExamplesJarName)
# input_file = format("/user/hadoop/mapredsmokeinput")
# output_file = format("/user/hadoop/mapredsmokeoutput")
# cleanup_cmd = format("cmd /C {hadoop_exe} fs -rm -r -f {output_file} {input_file}")
# create_file_cmd = format("cmd /C {hadoop_exe} fs -put {tested_file} {input_file}")
# run_wordcount_job = format("cmd /C {hadoop_exe} jar {jar_path} wordcount {input_file} {output_file}")
# test_cmd = format("cmd /C {hadoop_exe} fs -test -e {output_file}")
#
# if params.security_enabled:
# kinit_cmd = "{0} -kt {1} {2};".format(kinit_path_local, smoke_user_keytab, smokeuser)
# Execute(kinit_cmd)
#
# Execute(cleanup_cmd,
# tries=1,
# try_sleep=5,
# logoutput=True,
# user=params.hdfs_user
# )
#
# Execute(create_file_cmd,
# tries=1,
# try_sleep=5,
# logoutput=True,
# user=params.hdfs_user
# )
#
# Execute(run_wordcount_job,
# tries=1,
# try_sleep=5,
# logoutput=True,
# user=params.hdfs_user
# )
#
# Execute(test_cmd,
# logoutput=True,
# user=params.hdfs_user
# )
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class MapReduce2ServiceCheckDefault(MapReduce2ServiceCheck):
def service_check(self, env):
import params
env.set_params(params)
jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
input_file = format("/user/{smokeuser}/mapredsmokeinput")
output_file = format("/user/{smokeuser}/mapredsmokeoutput")
test_cmd = format("fs -test -e {output_file}")
run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
params.HdfsResource(format("/user/{smokeuser}"),
type="directory",
action="create_on_execute",
owner=params.smokeuser,
mode=params.smoke_hdfs_user_mode,
)
params.HdfsResource(output_file,
action = "delete_on_execute",
type = "directory",
dfs_type = params.dfs_type,
)
params.HdfsResource(input_file,
action = "create_on_execute",
type = "file",
source = "/etc/passwd",
dfs_type = params.dfs_type,
)
params.HdfsResource(None, action="execute")
# initialize the ticket
if params.security_enabled:
kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
Execute(kinit_cmd, user=params.smokeuser)
ExecuteHadoop(run_wordcount_job,
tries=1,
try_sleep=5,
user=params.smokeuser,
bin_dir=params.execute_path,
conf_dir=params.hadoop_conf_dir,
logoutput=True)
# the ticket may have expired, so re-initialize
if params.security_enabled:
kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
Execute(kinit_cmd, user=params.smokeuser)
ExecuteHadoop(test_cmd,
user=params.smokeuser,
bin_dir=params.execute_path,
conf_dir=params.hadoop_conf_dir)
if __name__ == "__main__":
MapReduce2ServiceCheck().execute()
| 35.872832 | 112 | 0.670641 |
53c9070a1079354bf87c168f4d478de283e3362d | 1,496 | py | Python | src/day2.py | TobiasRoeding/advent-of-code-2021 | 3db16d52ad9f4f04ac7f43087f6f504dca41cc43 | [
"Unlicense"
] | null | null | null | src/day2.py | TobiasRoeding/advent-of-code-2021 | 3db16d52ad9f4f04ac7f43087f6f504dca41cc43 | [
"Unlicense"
] | null | null | null | src/day2.py | TobiasRoeding/advent-of-code-2021 | 3db16d52ad9f4f04ac7f43087f6f504dca41cc43 | [
"Unlicense"
] | null | null | null | from src.utils import read_array_from_file
class Day2:
def __init__(self, input="src/input/day2.txt"):
self.INPUT = input
def part1(self):
arr = read_array_from_file(self.INPUT)
arr = [(a.split(" ")) for a in arr]
horizontal, depth = 0, 0
for direction, amount in arr:
amount = int(amount)
if direction == "forward":
horizontal += amount
elif direction == "up":
depth -= amount
elif direction == "down":
depth += amount
print(f"horizontal position: {horizontal}, depth: {depth}")
result = horizontal * depth
return result
def part2(self):
arr = read_array_from_file(self.INPUT)
arr = [(a.split(" ")) for a in arr]
horizontal, depth, aim = 0, 0, 0
for direction, amount in arr:
amount = int(amount)
if direction == "forward":
horizontal += amount
depth += amount * aim
elif direction == "up":
aim -= amount
elif direction == "down":
aim += amount
print(f"horizontal position: {horizontal}, depth: {depth}, aim: {aim}")
result = horizontal * depth
return result
def execute(self):
print(f"Solution for part 1: {self.part1()}")
print(f"Solution for part 2: {self.part2()}")
if __name__ == "__main__":
Day2().execute()
| 25.793103 | 79 | 0.524733 |
13abe9fc26aa87fac991559eeb3522cd2cff305f | 15,934 | py | Python | gammapy/irf/psf/map.py | AtreyeeS/gammapy | a3b47c3da08900a833f0360e0374203e054cadfc | [
"BSD-3-Clause"
] | null | null | null | gammapy/irf/psf/map.py | AtreyeeS/gammapy | a3b47c3da08900a833f0360e0374203e054cadfc | [
"BSD-3-Clause"
] | 1 | 2017-07-04T15:03:24.000Z | 2017-09-11T08:44:14.000Z | gammapy/irf/psf/map.py | AtreyeeS/gammapy | a3b47c3da08900a833f0360e0374203e054cadfc | [
"BSD-3-Clause"
] | 1 | 2022-03-05T15:56:38.000Z | 2022-03-05T15:56:38.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import astropy.units as u
from astropy.visualization import quantity_support
from gammapy.maps import Map, MapCoord, WcsGeom, MapAxis
from gammapy.modeling.models import PowerLawSpectralModel
from gammapy.utils.random import InverseCDFSampler, get_random_state
from gammapy.utils.gauss import Gauss2DPDF
from .kernel import PSFKernel
from .core import PSF
from ..core import IRFMap
__all__ = ["PSFMap"]
class IRFLikePSF(PSF):
required_axes = ["energy_true", "rad", "lat_idx", "lon_idx"]
tag = "irf_like_psf"
class PSFMap(IRFMap):
"""Class containing the Map of PSFs and allowing to interact with it.
Parameters
----------
psf_map : `~gammapy.maps.Map`
the input PSF Map. Should be a Map with 2 non spatial axes.
rad and true energy axes should be given in this specific order.
exposure_map : `~gammapy.maps.Map`
Associated exposure map. Needs to have a consistent map geometry.
Examples
--------
::
from astropy.coordinates import SkyCoord
from gammapy.maps import WcsGeom, MapAxis
from gammapy.data import Observation
from gammapy.irf import load_cta_irfs
from gammapy.makers import MapDatasetMaker
# Define observation
pointing = SkyCoord("0d", "0d")
irfs = load_cta_irfs("$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits")
obs = Observation.create(pointing=pointing, irfs=irfs, livetime="1h")
# Create WcsGeom
# Define energy axis. Note that the name is fixed.
energy_axis = MapAxis.from_energy_bounds("0.1 TeV", "10 TeV", nbin=3, name="energy_true")
# Define rad axis. Again note the axis name
rad_axis = MapAxis.from_bounds(0, 0.5, nbin=100, name="rad", unit="deg")
geom = WcsGeom.create(
binsz=0.25, width="5 deg", skydir=pointing, axes=[rad_axis, energy_axis]
)
maker = MapDatasetMaker()
psf = maker.make_psf(geom=geom, observation=obs)
# Get a PSF kernel at the center of the image
geom=exposure_geom.upsample(factor=10).drop("rad")
psf_kernel = psf_map.get_psf_kernel(geom=geom)
"""
tag = "psf_map"
required_axes = ["rad", "energy_true"]
def __init__(self, psf_map, exposure_map=None):
super().__init__(irf_map=psf_map, exposure_map=exposure_map)
@property
def psf_map(self):
return self._irf_map
@psf_map.setter
def psf_map(self, value):
self._irf_map = value
def normalize(self):
"""Normalize PSF map"""
self.psf_map.normalize(axis_name="rad")
@classmethod
def from_geom(cls, geom):
"""Create psf map from geom.
Parameters
----------
geom : `Geom`
PSF map geometry.
Returns
-------
psf_map : `PSFMap`
Point spread function map.
"""
geom_exposure = geom.squash(axis_name="rad")
exposure_psf = Map.from_geom(geom_exposure, unit="m2 s")
psf_map = Map.from_geom(geom, unit="sr-1")
return cls(psf_map, exposure_psf)
# TODO: this is a workaround for now, probably add Map.integral() or similar
@property
def _psf_irf(self):
geom = self.psf_map.geom
npix_x, npix_y = geom.npix
axis_lon = MapAxis.from_edges(np.arange(npix_x + 1) - 0.5, name="lon_idx")
axis_lat = MapAxis.from_edges(np.arange(npix_y + 1) - 0.5, name="lat_idx")
return IRFLikePSF(
axes=[geom.axes["energy_true"], geom.axes["rad"], axis_lat, axis_lon],
data=self.psf_map.data,
unit=self.psf_map.unit,
)
def _get_irf_coords(self, **kwargs):
coords = MapCoord.create(kwargs)
geom = self.psf_map.geom.to_image()
lon_pix, lat_pix = geom.coord_to_pix((coords.lon, coords.lat))
coords_irf = {
"lon_idx": lon_pix,
"lat_idx": lat_pix,
"energy_true": coords["energy_true"],
}
try:
coords_irf["rad"] = coords["rad"]
except KeyError:
pass
return coords_irf
def containment(self, rad, energy_true, position=None):
"""Containment at given coords
Parameters
----------
rad : `~astropy.units.Quantity`
Rad value
energy_true : `~astropy.units.Quantity`
Energy true value
position : `~astropy.coordinates.SkyCoord`
Sky position. By default the center of the map is chosen
Returns
-------
containment : `~astropy.units.Quantity`
Containment values
"""
if position is None:
position = self.psf_map.geom.center_skydir
coords = {"skycoord": position, "rad": rad, "energy_true": energy_true}
return self.psf_map.integral(axis_name="rad", coords=coords).to("")
def containment_radius(self, fraction, energy_true, position=None):
"""Containment at given coords
Parameters
----------
fraction : float
Containment fraction
energy_true : `~astropy.units.Quantity`
Energy true value
position : `~astropy.coordinates.SkyCoord`
Sky position. By default the center of the map is chosen
Returns
-------
containment : `~astropy.units.Quantity`
Containment values
"""
if position is None:
position = self.psf_map.geom.center_skydir
coords = self._get_irf_coords(energy_true=energy_true, skycoord=position)
return self._psf_irf.containment_radius(fraction, **coords)
def containment_radius_map(self, energy_true, fraction=0.68):
"""Containment radius map.
Parameters
----------
energy_true : `~astropy.units.Quantity`
Energy at which to compute the containment radius
fraction : float
Containment fraction (range: 0 to 1)
Returns
-------
containment_radius_map : `~gammapy.maps.Map`
Containment radius map
"""
geom = self.psf_map.geom.to_image()
data = self.containment_radius(
fraction=fraction,
energy_true=energy_true,
position=geom.get_coord().skycoord,
)
return Map.from_geom(geom=geom, data=data.value, unit=data.unit)
def get_psf_kernel(self, geom, position=None, max_radius=None, containment=0.999, factor=4):
"""Returns a PSF kernel at the given position.
The PSF is returned in the form a WcsNDMap defined by the input Geom.
Parameters
----------
geom : `~gammapy.maps.Geom`
Target geometry to use
position : `~astropy.coordinates.SkyCoord`
Target position. Should be a single coordinate. By default the
center position is used.
max_radius : `~astropy.coordinates.Angle`
maximum angular size of the kernel map
containment : float
Containment fraction to use as size of the kernel. The max. radius
across all energies is used. The radius can be overwritten using
the `max_radius` argument.
factor : int
oversampling factor to compute the PSF
Returns
-------
kernel : `~gammapy.irf.PSFKernel`
the resulting kernel
"""
# TODO: try to simplify...is the oversampling needed?
if position is None:
position = self.psf_map.geom.center_skydir
position = self._get_nearest_valid_position(position)
if max_radius is None:
energy_axis = self.psf_map.geom.axes["energy_true"]
radii = self.containment_radius(
fraction=containment,
position=position,
energy_true=energy_axis.center
)
max_radius = np.max(radii)
geom = geom.to_odd_npix(max_radius=max_radius)
geom_upsampled = geom.upsample(factor=factor)
coords = geom_upsampled.get_coord(sparse=True)
rad = coords.skycoord.separation(geom.center_skydir)
coords = {"energy_true": coords["energy_true"], "rad": rad, "skycoord": position}
data = self.psf_map.interp_by_coord(coords=coords, method="linear",)
kernel_map = Map.from_geom(geom=geom_upsampled, data=np.clip(data, 0, np.inf))
kernel_map = kernel_map.downsample(factor, preserve_counts=True)
return PSFKernel(kernel_map, normalize=True)
def sample_coord(self, map_coord, random_state=0):
"""Apply PSF corrections on the coordinates of a set of simulated events.
Parameters
----------
map_coord : `~gammapy.maps.MapCoord` object.
Sequence of coordinates and energies of sampled events.
random_state : {int, 'random-seed', 'global-rng', `~numpy.random.RandomState`}
Defines random number generator initialisation.
Passed to `~gammapy.utils.random.get_random_state`.
Returns
-------
corr_coord : `~gammapy.maps.MapCoord` object.
Sequence of PSF-corrected coordinates of the input map_coord map.
"""
random_state = get_random_state(random_state)
rad_axis = self.psf_map.geom.axes["rad"]
coord = {
"skycoord": map_coord.skycoord.reshape(-1, 1),
"energy_true": map_coord["energy_true"].reshape(-1, 1),
"rad": rad_axis.center,
}
pdf = (
self.psf_map.interp_by_coord(coord)
* rad_axis.center.value
* rad_axis.bin_width.value
)
sample_pdf = InverseCDFSampler(pdf, axis=1, random_state=random_state)
pix_coord = sample_pdf.sample_axis()
separation = rad_axis.pix_to_coord(pix_coord)
position_angle = random_state.uniform(360, size=len(map_coord.lon)) * u.deg
event_positions = map_coord.skycoord.directional_offset_by(
position_angle=position_angle, separation=separation
)
return MapCoord.create(
{"skycoord": event_positions, "energy_true": map_coord["energy_true"]}
)
@classmethod
def from_gauss(cls, energy_axis_true, rad_axis=None, sigma=0.1 * u.deg, geom=None):
"""Create all -sky PSF map from Gaussian width.
This is used for testing and examples.
The width can be the same for all energies
or be an array with one value per energy node.
It does not depend on position.
Parameters
----------
energy_axis_true : `~gammapy.maps.MapAxis`
True energy axis.
rad_axis : `~gammapy.maps.MapAxis`
Offset angle wrt source position axis.
sigma : `~astropy.coordinates.Angle`
Gaussian width.
geom : `Geom`
Image geometry. By default an allsky geometry is created.
Returns
-------
psf_map : `PSFMap`
Point spread function map.
"""
from gammapy.datasets.map import RAD_AXIS_DEFAULT
if rad_axis is None:
rad_axis = RAD_AXIS_DEFAULT.copy()
if geom is None:
geom = WcsGeom.create(npix=(2, 1), proj="CAR", binsz=180,)
geom = geom.to_cube([rad_axis, energy_axis_true])
coords = geom.get_coord(sparse=True)
sigma = u.Quantity(sigma).reshape((-1, 1, 1, 1))
gauss = Gauss2DPDF(sigma=sigma)
data = gauss(coords["rad"]) * np.ones(geom.data_shape)
psf_map = Map.from_geom(geom=geom, data=data.to_value("sr-1"), unit="sr-1")
exposure_map = Map.from_geom(
geom=geom.squash(axis_name="rad"), unit="m2 s", data=1.
)
return cls(psf_map=psf_map, exposure_map=exposure_map)
def to_image(self, spectrum=None, keepdims=True):
"""Reduce to a 2-D map after weighing
with the associated exposure and a spectrum
Parameters
----------
spectrum : `~gammapy.modeling.models.SpectralModel`, optional
Spectral model to compute the weights.
Default is power-law with spectral index of 2.
keepdims : bool, optional
If True, the energy axis is kept with one bin.
If False, the axis is removed
Returns
-------
psf_out : `PSFMap`
`PSFMap` with the energy axis summed over
"""
from gammapy.makers.utils import _map_spectrum_weight
if spectrum is None:
spectrum = PowerLawSpectralModel(index=2.0)
exp_weighed = _map_spectrum_weight(self.exposure_map, spectrum)
exposure = exp_weighed.sum_over_axes(
axes_names=["energy_true"], keepdims=keepdims
)
psf_data = exp_weighed.data * self.psf_map.data / exposure.data
psf_map = Map.from_geom(geom=self.psf_map.geom, data=psf_data, unit="sr-1")
psf = psf_map.sum_over_axes(axes_names=["energy_true"], keepdims=keepdims)
return self.__class__(psf_map=psf, exposure_map=exposure)
def plot_containment_radius_vs_energy(
self, ax=None, fraction=[0.68, 0.95], **kwargs
):
"""Plot containment fraction as a function of energy.
The method plots the containment radius at the center of the map.
Parameters
----------
ax : `~matplotlib.pyplot.Axes`
Axes to plot on.
fraction : list of float or `~numpy.ndarray`
Containment fraction between 0 and 1.
**kwargs : dict
Keyword arguments passed to `~matplotlib.pyplot.plot`
Returns
-------
ax : `~matplotlib.pyplot.Axes`
Axes to plot on.
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
position = self.psf_map.geom.center_skydir
energy_true = self.psf_map.geom.axes["energy_true"].center
for frac in fraction:
radius = self.containment_radius(
energy_true=energy_true, position=position, fraction=frac
)
label = f"Containment: {100 * frac:.1f}%"
with quantity_support():
ax.plot(energy_true, radius, label=label, **kwargs)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel(f"Energy ({ax.xaxis.units})")
ax.set_ylabel(f"Containment radius ({ax.yaxis.units})")
return ax
def plot_psf_vs_rad(self, ax=None, energy_true=[0.1, 1, 10] * u.TeV, **kwargs):
"""Plot PSF vs radius.
The method plots the profile at the center of the map.
Parameters
----------
ax : `~matplotlib.pyplot.Axes`
Axes to plot on.
energy_true : `~astropy.units.Quantity`
Energies where to plot the PSF.
**kwargs : dict
Keyword arguments pass to `~matplotlib.pyplot.plot`.
Returns
-------
ax : `~matplotlib.pyplot.Axes`
Axes to plot on.
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
rad = self.psf_map.geom.axes["rad"].center
for value in energy_true:
psf_value = self.psf_map.interp_by_coord(
{
"skycoord": self.psf_map.geom.center_skydir,
"energy_true": value,
"rad": rad,
}
)
label = f"{value:.0f}"
with quantity_support():
ax.plot(rad, psf_value, label=label, **kwargs)
ax.set_yscale("log")
ax.set_xlabel(f"Rad ({ax.xaxis.units})")
ax.set_ylabel(f"PSF ({ax.yaxis.units})")
plt.legend()
return ax
def __str__(self):
return str(self.psf_map)
| 33.058091 | 104 | 0.603301 |
ea1549133040d5e13f5cb0c23bfcbef7800e1b5c | 2,269 | py | Python | tests/test_command.py | evgeni/voltverine | d408cd7e515adeab605253eb9f56e46984fce1ab | [
"MIT"
] | null | null | null | tests/test_command.py | evgeni/voltverine | d408cd7e515adeab605253eb9f56e46984fce1ab | [
"MIT"
] | null | null | null | tests/test_command.py | evgeni/voltverine | d408cd7e515adeab605253eb9f56e46984fce1ab | [
"MIT"
] | null | null | null | import unittest
import sys
import os
try:
from unittest import mock
except:
import mock
import voltverine.plugins
class TestCommand(unittest.TestCase):
def test_no_command_provided(self):
voltverine_plugin = voltverine.plugins.Command()
(action, info) = voltverine_plugin.analyze()
self.assertTrue(action == voltverine.plugins.DUNNO)
self.assertTrue(info == {})
def test_bad_command_provided(self):
with mock.patch('subprocess.call', side_effect=OSError):
voltverine_plugin = voltverine.plugins.Command(command='/bin/nothing')
(action, info) = voltverine_plugin.analyze()
self.assertTrue(action == voltverine.plugins.DUNNO)
self.assertTrue(info == {})
def test_command_ok(self):
with mock.patch('subprocess.call', return_value=0):
voltverine_plugin = voltverine.plugins.Command('/bin/true')
(action, info) = voltverine_plugin.analyze()
self.assertTrue(action == voltverine.plugins.OK)
self.assertTrue(info == {'retcode': 0})
def test_command_notok(self):
with mock.patch('subprocess.call', return_value=1):
voltverine_plugin = voltverine.plugins.Command('/bin/false')
(action, info) = voltverine_plugin.analyze()
self.assertTrue(action == voltverine.plugins.NOT_OK)
self.assertTrue(info == {'retcode': 1})
def test_command_ok_list(self):
with mock.patch('subprocess.call', return_value=0):
voltverine_plugin = voltverine.plugins.Command(['/bin/true'])
(action, info) = voltverine_plugin.analyze()
self.assertTrue(action == voltverine.plugins.OK)
self.assertTrue(info == {'retcode': 0})
def test_command_notok_li(self):
with mock.patch('subprocess.call', return_value=1):
voltverine_plugin = voltverine.plugins.Command(['/bin/false'])
(action, info) = voltverine_plugin.analyze()
self.assertTrue(action == voltverine.plugins.NOT_OK)
self.assertTrue(info == {'retcode': 1})
if __name__ == '__main__':
# avoid writing to stderr
unittest.main(testRunner=unittest.TextTestRunner(stream=sys.stdout, verbosity=2))
| 39.12069 | 85 | 0.656236 |
64703a2472832d56f3f77114ce9c149250f3ce13 | 6,343 | py | Python | toyrobot.py | vanwarantion/toyrobot | ebe883a6762dacf786a5cb05356e159073ba70da | [
"MIT"
] | null | null | null | toyrobot.py | vanwarantion/toyrobot | ebe883a6762dacf786a5cb05356e159073ba70da | [
"MIT"
] | null | null | null | toyrobot.py | vanwarantion/toyrobot | ebe883a6762dacf786a5cb05356e159073ba70da | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Settings:
ROBOT_DIRECTION_NAMES = [
'NORTH',
'SOUTH',
'EAST',
'WEST',
]
TABLE_DIMENSIONS = (5, 5)
import sys # For args and reading from stdin
class Robot():
"""
Toy Robot Object
"""
current_xy = [None, None]
current_f = None
_placed = False
def face_txt(self):
"""
:return: String name of current direction
"""
return ROBOT_DIRECTION_NAMES[self.current_f]
def report(self):
"""
:return: String of current location and direction. Or NOT PLACED if robot has not yet placed successfully
"""
if self._placed:
print "OUTPUT: %d, %d, %s" % (self.current_xy[0], self.current_xy[1], self.face_txt())
def valid_location(self, xy):
"""
Returns True if provided location is valid.
:param xy: X, Y coordinates
:return: True if location is within the table dimensions
"""
for _ in [0,1]:
if xy[_] > TABLE_DIMENSIONS[_] - 1 or xy[_] < 0:
return False
return True
def place(self, x, y, f):
"""
Place the robot at given coordinates and cartesian direction.
:param x: X value of the coordinates
:param y: Y value of the coordinates
:param f: Direction name (NORTH, SOUTH, EAST, WEST)
"""
try:
X, Y, F = int(x), int(y), ROBOT_DIRECTION_NAMES.index(f.upper())
except:
print "COULD NOT PLACE:", x, y, f
return None
if self.valid_location([X, Y]):
# Set location and direction
self.current_xy, self.current_f = [X, Y], F
# Mark robot as placed
self._placed = True
def move(self):
"""
Move the robot forward
"""
if not self._placed:
return None
# Movement values per each direction
cardinal_directions = [
[0, 1], # North
[0, -1], # South
[1, 0], # East
[-1, 0], # West
]
# Select direction and set target position
movement = cardinal_directions[self.current_f]
target_pos = [self.current_xy[0], self.current_xy[1]]
# Apply movement based on movement directions
for _ in [0,1]:
target_pos[_] += movement[_]
# Set current_xy if target position is valid
if self.valid_location(target_pos):
self.current_xy = target_pos
def rotate(self, amount):
"""
Rotates the robot (amount * 90) degrees clockwise.
Resulting directions for each initial direction are below;
Initial D | Left | Right | Left Index | Right Index
-----------|-------|-------|------------|-------------
North | West | East | 3 | 2
South | East | West | 2 | 3
East | North | South | 0 | 1
West | South | North | 1 | 0
:param amount: Amount of 90 degree turns to be applied
"""
# Abort if not placed
if not self._placed:
return None
# Right turn indexes
rotational_directions = [2,3,1,0]
# Apply rotation
for i in range(amount):
self.current_f = rotational_directions[self.current_f]
def set_cmd(self, cmd):
"""
Process command. Print error message if necessary
:param cmd: Text input
"""
# Available Commands
commands = {
'place': {'action': self.place, 'req_args_len': 3},
'move': {'action': self.move},
'left': {'action': self.rotate, 'arg': 3},
'right': {'action': self.rotate, 'arg': 1},
'report': {'action': self.report},
}
# Check if command is valid
my_cmd = cmd.split(" ", 1)
command_key = my_cmd[0].lower()
if command_key not in commands.keys():
print "Invalid command:", command_key
return None
# Find required parameters
action_params = commands[command_key]
if 'req_args_len' in action_params.keys():
# Multiple arguments are required
if len(my_cmd) == 1:
# No arguments given
print "Arguments required:", command_key
return None
my_args = my_cmd[1].split(',')
if len(my_args) < action_params['req_args_len']:
print "Insufficient arguments:", command_key
return None
# Create list of args and call target function
action_params['action'](*[_.strip() for _ in my_args])
elif 'arg' in action_params.keys():
# Argument is provided in action_params
action_params['action'](action_params['arg'])
else:
# Command does not require any arguments
action_params['action']()
if __name__ == '__main__':
r = Robot()
# Redneck testing
if len(sys.argv) > 1:
if sys.argv[1] == '--test':
print "No input. Just running some commands"
cmds = [
'SELF DESTRUCT', 'LEFT', 'MOVE', 'REPORT', 'PLACE 0,0, NORTH', 'LEFT', 'MOVE', 'REPORT', 'PLACE 2,3, SOUTH',
'MOVE', 'REPORT', 'RIGHT', 'MOVE', 'MOVE', 'REPORT', 'RIGHT', 'RIGHT', 'MOVE', 'MOVE', 'MOVE', 'MOVE',
'REPORT',
]
for _ in cmds:
print "Running:", _
r.set_cmd(_)
sys.exit()
# Check if we are working with a file or terminal
if sys.stdin.isatty():
print "Toy Robot Sim. Welcome!\n"
# User commands
while True:
try:
user_cmd = raw_input("Toy Robot> ")
r.set_cmd(user_cmd)
except KeyboardInterrupt:
print "\nBye!"
sys.exit()
else:
# Read lines & run commands
for line in sys.stdin:
cmd_to_run = line.strip()
# Ignore empty lines
if cmd_to_run == '':
continue
print "Command:", cmd_to_run
r.set_cmd(cmd_to_run) | 27.942731 | 124 | 0.511588 |
7110e2d74d013b278281978bbe8edff63c09eed9 | 3,632 | py | Python | shell_util.py | didicout/python_util | c463ae80b1a68115015985b26a33e33e7a4c6b11 | [
"MIT"
] | null | null | null | shell_util.py | didicout/python_util | c463ae80b1a68115015985b26a33e33e7a4c6b11 | [
"MIT"
] | null | null | null | shell_util.py | didicout/python_util | c463ae80b1a68115015985b26a33e33e7a4c6b11 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
shell util
"""
from __future__ import print_function
import subprocess
import sys
class Color:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def run_bash_script(script,
print_trace=False,
return_stdout=False,
return_stderr=False,
combine_stderr=False):
"""
run bash script
:param script: bash script string
:param print_trace: whether print script and result
:param return_stdout: catch stdout as string and return instead of printing to sys.stdout
:param return_stderr: catch stderr as string and return instead of printing to sys.stderr
:param combine_stderr: redirect stderr to stdout
:type script: str
:type print_trace: bool
:type return_stdout: bool
:type return_stderr: bool
:type combine_stderr: bool
:return: ret_value, stdout, stderr
:rtype: tuple[int, str, str]
"""
if print_trace:
print(Color.WARNING + script + Color.ENDC)
if return_stdout:
target_stdout = subprocess.PIPE
else:
target_stdout = sys.stdout
if combine_stderr:
target_stderr = subprocess.STDOUT
else:
if return_stderr:
target_stderr = subprocess.PIPE
else:
target_stderr = sys.stderr
p = subprocess.Popen("/bin/bash", stdout=target_stdout, stderr=target_stderr,
stdin=subprocess.PIPE)
p.stdin.write(script)
ret_stdout = None
ret_stderr = None
tmp_stdout, tmp_stderr = p.communicate()
ret_value = p.returncode
if return_stdout:
ret_stdout = tmp_stdout
if return_stderr and not combine_stderr:
ret_stderr = tmp_stderr
if print_trace:
if ret_value == 0:
print(Color.OKGREEN + str(ret_value) + Color.ENDC)
else:
print(Color.FAIL + str(ret_value) + Color.ENDC)
if return_stdout:
print(ret_stdout)
if return_stderr and not combine_stderr:
print(ret_stderr)
return ret_value, ret_stdout, ret_stderr
# test
print("================= 1 =================")
print(run_bash_script("echo 123"))
print("================= 2 =================")
print(run_bash_script("echo 123 && cp", return_stderr=True, return_stdout=True, print_trace=True))
print("================= 3 =================")
print(run_bash_script("echo 123 && cp", return_stdout=True, return_stderr=True, combine_stderr=True, print_trace=True))
print("================= 4 =================")
print(run_bash_script("echo 123 && cp", return_stdout=True, return_stderr=False, combine_stderr=True, print_trace=True))
print("================= 5 =================")
print(run_bash_script("echo 123 && cp", return_stdout=True, return_stderr=False, print_trace=True))
print("================= 6 =================")
print(run_bash_script("echo 123 && cp", return_stdout=False, return_stderr=True, combine_stderr=True, print_trace=True))
print("================= 7 =================")
print(run_bash_script("echo 123 && cp", return_stdout=False, return_stderr=True, combine_stderr=False, print_trace=True))
print("================= 8 =================")
print(run_bash_script("echo 123 && cp", return_stdout=False, return_stderr=False, combine_stderr=True, print_trace=True))
print("================= 9 =================")
print(run_bash_script("echo 123 && cp", return_stdout=False, return_stderr=False, combine_stderr=False, print_trace=True))
| 37.443299 | 122 | 0.613987 |
ee8c26c3ca36bffb29e28e6a409f5bac42a6dad4 | 56,495 | py | Python | tests/modeladmin/tests.py | cpsimpson/django | d60b96d98881b47c845125e82269ea6a9b268fbb | [
"BSD-3-Clause"
] | null | null | null | tests/modeladmin/tests.py | cpsimpson/django | d60b96d98881b47c845125e82269ea6a9b268fbb | [
"BSD-3-Clause"
] | null | null | null | tests/modeladmin/tests.py | cpsimpson/django | d60b96d98881b47c845125e82269ea6a9b268fbb | [
"BSD-3-Clause"
] | 1 | 2020-05-25T08:55:19.000Z | 2020-05-25T08:55:19.000Z | from __future__ import unicode_literals
from datetime import date
from django import forms
from django.contrib.admin.options import (ModelAdmin, TabularInline,
HORIZONTAL, VERTICAL)
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect
from django.contrib.admin.validation import ModelAdminValidator
from django.contrib.admin import (SimpleListFilter,
BooleanFieldListFilter)
from django.core.checks import Error
from django.core.exceptions import ImproperlyConfigured
from django.forms.models import BaseModelFormSet
from django.forms.widgets import Select
from django.test import TestCase, ignore_warnings
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from .models import Band, Concert, ValidationTestModel, ValidationTestInlineModel
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class ModelAdminTests(TestCase):
def setUp(self):
self.band = Band.objects.create(
name='The Doors',
bio='',
sign_date=date(1965, 1, 1),
)
self.site = AdminSite()
# form/fields/fieldsets interaction ##############################
def test_default_fields(self):
ma = ModelAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request)),
['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request, self.band)),
['name', 'bio', 'sign_date'])
def test_default_fieldsets(self):
# fieldsets_add and fieldsets_change should return a special data structure that
# is used in the templates. They should generate the "right thing" whether we
# have specified a custom form, the fields argument, or nothing at all.
#
# Here's the default case. There are no custom form_add/form_change methods,
# no fields argument, and no fieldsets argument.
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class BandAdmin(ModelAdmin):
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['name', 'bio']})]
ma = BandAdmin(Band, self.site)
form = ma.get_form(None)
self.assertEqual(form._meta.fields, ['name', 'bio'])
class InlineBandAdmin(TabularInline):
model = Concert
fk_name = 'main_band'
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['day', 'transport']})]
ma = InlineBandAdmin(Band, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['day', 'transport'])
def test_lookup_allowed_allows_nonexistent_lookup(self):
"""
Ensure that a lookup_allowed allows a parameter
whose field lookup doesn't exist.
Refs #21129.
"""
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertTrue(ma.lookup_allowed('name__nonexistent', 'test_value'))
def test_field_arguments(self):
# If we specify the fields argument, fieldsets_add and fielsets_change should
# just stick the fields into a formsets structure and return it.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_fields(request)), ['name'])
self.assertEqual(list(ma.get_fields(request, self.band)), ['name'])
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name']})])
def test_field_arguments_restricted_on_form(self):
# If we specify fields or fieldsets, it should exclude fields on the Form class
# to the fields specified. This may cause errors to be raised in the db layer if
# required model fields aren't in fields/fieldsets, but that's preferable to
# ghost errors where you have a field in your Form class that isn't being
# displayed because you forgot to add it to fields/fieldsets
# Using `fields`.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields),
['name'])
# Using `fieldsets`.
class BandAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name']})]
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields),
['name'])
# Using `exclude`.
class BandAdmin(ModelAdmin):
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
# You can also pass a tuple to `exclude`.
class BandAdmin(ModelAdmin):
exclude = ('bio',)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
# Using `fields` and `exclude`.
class BandAdmin(ModelAdmin):
fields = ['name', 'bio']
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name'])
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `ModelAdmin.readonly_fields` and when no
`ModelAdmin.exclude` is defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
readonly_fields = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['sign_date'])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
readonly_fields = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is overridden if
`ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
exclude = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['bio', 'sign_date'])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'day', 'id', 'DELETE'])
def test_custom_form_validation(self):
# If we specify a form, it should use it allowing custom validation to work
# properly. This won't, however, break any of the admin widgets or media.
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'bio', 'sign_date', 'delete'])
self.assertEqual(
type(ma.get_form(request).base_fields['sign_date'].widget),
AdminDateWidget)
def test_form_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `ModelAdmin.get_form()`
overrides all other declarations. Refs #8999.
"""
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['name']
class BandAdmin(ModelAdmin):
exclude = ['sign_date']
form = AdminBandForm
def get_form(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['bio']
return super(BandAdmin, self).get_form(request, obj, **kwargs)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
def test_formset_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `InlineModelAdmin.get_formset()`
overrides all other declarations. Refs #8999.
"""
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_formset(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['opening_band']
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE'])
def test_queryset_override(self):
# If we need to override the queryset of a ModelChoiceField in our custom form
# make sure that RelatedFieldWidgetWrapper doesn't mess that up.
band2 = Band(name='The Beatles', bio='', sign_date=date(1962, 1, 1))
band2.save()
class ConcertAdmin(ModelAdmin):
pass
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band">'
'<option value="" selected="selected">---------</option>'
'<option value="%d">The Beatles</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % (band2.id, self.band.id))
class AdminConcertForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AdminConcertForm, self).__init__(*args, **kwargs)
self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
class ConcertAdminWithForm(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdminWithForm(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band">'
'<option value="" selected="selected">---------</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % self.band.id)
def test_regression_for_ticket_15820(self):
"""
Ensure that `obj` is passed from `InlineModelAdmin.get_fieldsets()` to
`InlineModelAdmin.get_formset()`.
"""
class CustomConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['day']
class ConcertInline(TabularInline):
model = Concert
fk_name = 'main_band'
def get_formset(self, request, obj=None, **kwargs):
if obj:
kwargs['form'] = CustomConcertForm
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
Concert.objects.create(main_band=self.band, opening_band=self.band, day=1)
ma = BandAdmin(Band, self.site)
inline_instances = ma.get_inline_instances(request)
fieldsets = list(inline_instances[0].get_fieldsets(request))
self.assertEqual(fieldsets[0][1]['fields'], ['main_band', 'opening_band', 'day', 'transport'])
fieldsets = list(inline_instances[0].get_fieldsets(request, inline_instances[0].model))
self.assertEqual(fieldsets[0][1]['fields'], ['day'])
# radio_fields behavior ###########################################
def test_default_foreign_key_widget(self):
# First, without any radio_fields specified, the widgets for ForeignKey
# and fields with choices specified ought to be a basic Select widget.
# ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
# they need to be handled properly when type checking. For Select fields, all of
# the choices lists have a first entry of dashes.
cma = ModelAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
Select)
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget), Select)
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[('', '---------'), (1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
Select)
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
def test_foreign_key_as_radio_field(self):
# Now specify all the fields as radio_fields. Widgets should now be
# RadioSelect, and the choices list should have a first entry of 'None' if
# blank=True for the model field. Finally, the widget should have the
# 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
class ConcertAdmin(ModelAdmin):
radio_fields = {
'main_band': HORIZONTAL,
'opening_band': VERTICAL,
'day': VERTICAL,
'transport': HORIZONTAL,
}
cma = ConcertAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['main_band'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['main_band'].widget.choices),
[(self.band.id, 'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', 'None'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['day'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[(1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['transport'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['transport'].widget.choices),
[('', 'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ('transport',)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['main_band', 'opening_band', 'day'])
class AdminConcertForm(forms.ModelForm):
extra = forms.CharField()
class Meta:
model = Concert
fields = ['extra', 'transport']
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['extra', 'transport'])
class ConcertInline(TabularInline):
form = AdminConcertForm
model = Concert
fk_name = 'main_band'
can_delete = True
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['extra', 'transport', 'id', 'DELETE', 'main_band'])
class CheckTestCase(TestCase):
def assertIsInvalid(self, model_admin, model, msg,
id=None, hint=None, invalid_obj=None):
invalid_obj = invalid_obj or model_admin
errors = model_admin.check(model=model)
expected = [
Error(
msg,
hint=hint,
obj=invalid_obj,
id=id,
)
]
self.assertEqual(errors, expected)
def assertIsInvalidRegexp(self, model_admin, model, msg,
id=None, hint=None, invalid_obj=None):
"""
Same as assertIsInvalid but treats the given msg as a regexp.
"""
invalid_obj = invalid_obj or model_admin
errors = model_admin.check(model=model)
self.assertEqual(len(errors), 1)
error = errors[0]
self.assertEqual(error.hint, hint)
self.assertEqual(error.obj, invalid_obj)
self.assertEqual(error.id, id)
six.assertRegex(self, error.msg, msg)
def assertIsValid(self, model_admin, model):
errors = model_admin.check(model=model)
expected = []
self.assertEqual(errors, expected)
class RawIdCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields' must be a list or tuple.",
'admin.E001')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'raw_id_fields[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E002')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields[0]' must be a ForeignKey or ManyToManyField.",
'admin.E003')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('users',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FieldsetsCheckTests(CheckTestCase):
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets' must be a list or tuple.",
'admin.E007')
def test_non_iterable_item(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ({},)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be a list or tuple.",
'admin.E008')
def test_item_not_a_pair(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ((),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be of length 2.",
'admin.E009')
def test_second_element_of_item_not_a_dict(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", ()),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must be a dictionary.",
'admin.E010')
def test_missing_fields_key(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {}),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must contain the key 'fields'.",
'admin.E011')
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_specified_both_fields_and_fieldsets(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
fields = ['name']
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"Both 'fieldsets' and 'fields' are specified.",
'admin.E005')
def test_duplicate_fields(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name', 'name']})]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"There are duplicate field(s) in 'fieldsets[0][1]'.",
'admin.E012')
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
self.assertIsValid(BandAdmin, Band)
class FieldsCheckTests(CheckTestCase):
def test_duplicate_fields_in_fields(self):
class ValidationTestModelAdmin(ModelAdmin):
fields = ['name', 'name']
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fields' contains duplicate field(s).",
'admin.E006')
def test_inline(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = 10
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fields' must be a list or tuple.",
'admin.E004',
invalid_obj=ValidationTestInline)
class FormCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeForm(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
form = FakeForm
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'form' must inherit from 'BaseModelForm'.",
'admin.E016')
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
self.assertIsValid(BandAdmin, Band)
def test_valid_case(self):
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('name', 'bio', 'sign_date', 'delete')
}),
)
self.assertIsValid(BandAdmin, Band)
class FilterVerticalCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical' must be a list or tuple.",
'admin.E017')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'filter_vertical[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E019')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical[0]' must be a ManyToManyField.",
'admin.E020')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("users",)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FilterHorizontalCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal' must be a list or tuple.",
'admin.E018')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'filter_horizontal[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E019')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal[0]' must be a ManyToManyField.",
'admin.E020')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("users",)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class RadioFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = ()
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'radio_fields' must be a dictionary.",
'admin.E021')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {'non_existent_field': VERTICAL}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'radio_fields' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E022')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {'name': VERTICAL}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'radio_fields' refers to 'name', which is not an instance "
"of ForeignKey, and does not have a 'choices' definition."),
'admin.E023')
def test_invalid_value(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": None}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'radio_fields[\"state\"]' must be either admin.HORIZONTAL or admin.VERTICAL.",
'admin.E024')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": VERTICAL}
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class PrepopulatedFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = ()
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields' must be a dictionary.",
'admin.E026')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {'non_existent_field': ("slug",)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E027')
def test_missing_field_again(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ('non_existent_field',)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields[\"slug\"][0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E030')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"users": ('name',)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields' refers to 'users', which must not be "
"a DateTimeField, ForeignKey or ManyToManyField."),
'admin.E028')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ('name',)}
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display' must be a list or tuple.",
'admin.E107')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'list_display[0]' refers to 'non_existent_field', which is not a callable, an attribute "
"of 'ValidationTestModelAdmin', or an attribute or method on 'modeladmin.ValidationTestModel'."),
'admin.E108')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('users',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display[0]' must not be a ManyToManyField.",
'admin.E109')
def test_valid_case(self):
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayLinksCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display_links' must be a list, a tuple, or None.",
'admin.E110')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display_links[0]' refers to 'non_existent_field', which is not defined in 'list_display'.",
'admin.E111')
def test_missing_in_list_display(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display_links[0]' refers to 'name', which is not defined in 'list_display'.",
'admin.E111')
def test_valid_case(self):
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
list_display_links = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_None_is_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = None
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListFilterTests(CheckTestCase):
def test_list_filter_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter' must be a list or tuple.",
'admin.E112')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' refers to 'non_existent_field', which does not refer to a Field.",
'admin.E116')
def test_not_filter(self):
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (RandomClass,)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must inherit from 'ListFilter'.",
'admin.E113')
def test_not_filter_again(self):
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', RandomClass),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115')
def test_not_filter_again_again(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', AwesomeFilter),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115')
def test_not_associated_with_field_name(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (BooleanFieldListFilter,)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must not inherit from 'FieldListFilter'.",
'admin.E114')
def test_valid_case(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('is_active', AwesomeFilter, ('is_active', BooleanFieldListFilter), 'no')
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListPerPageCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 'hello'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_per_page' must be an integer.",
'admin.E118')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 100
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListMaxShowAllCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 'hello'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_max_show_all' must be an integer.",
'admin.E119')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 200
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SearchFieldsCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
search_fields = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'search_fields' must be a list or tuple.",
'admin.E126')
class DateHierarchyCheckTests(CheckTestCase):
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'non_existent_field'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'date_hierarchy' refers to 'non_existent_field', which "
"is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E127')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'name'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'date_hierarchy' must be a DateField or DateTimeField.",
'admin.E128')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'pub_date'
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class OrderingCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering' must be a list or tuple.",
'admin.E031')
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin,
ValidationTestModel,
"The value of 'ordering[0]' refers to 'non_existent_field', which is not an attribute of 'modeladmin.ValidationTestModel'.",
'admin.E033',
)
def test_random_marker_not_alone(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?', 'name')
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'ordering' has the random ordering marker '?', but contains "
"other fields as well."),
'admin.E032',
hint='Either remove the "?", or remove the other fields.')
def test_valid_random_marker_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_valid_complex_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('band__name',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('name',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListSelectRelatedCheckTests(CheckTestCase):
def test_invalid_type(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = 1
self.assertIsInvalid(ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_select_related' must be a boolean, tuple or list.",
'admin.E117')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = False
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SaveAsCheckTests(CheckTestCase):
def test_not_boolean(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'save_as' must be a boolean.",
'admin.E101')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = True
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SaveOnTopCheckTests(CheckTestCase):
def test_not_boolean(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'save_on_top' must be a boolean.",
'admin.E102')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = True
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class InlinesCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
inlines = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'inlines' must be a list or tuple.",
'admin.E103')
def test_not_model_admin(self):
class ValidationTestInline(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must inherit from 'BaseModelAdmin'\.",
'admin.E104')
def test_missing_model_field(self):
class ValidationTestInline(TabularInline):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must have a 'model' attribute\.",
'admin.E105')
def test_invalid_model_type(self):
""" Test if `model` attribute on inline model admin is a models.Model.
"""
class SomethingBad(object):
pass
class ValidationTestInline(TabularInline):
model = SomethingBad
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"The value of '.*\.ValidationTestInline.model' must be a Model\.",
'admin.E106')
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FkNameCheckTests(CheckTestCase):
def test_missing_field(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = 'non_existent_field'
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"'modeladmin.ValidationTestInlineModel' has no field named 'non_existent_field'.",
'admin.E202',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "parent"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ExtraCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'extra' must be an integer.",
'admin.E203',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class MaxNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'max_num' must be an integer.",
'admin.E204',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class MinNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'min_num' must be an integer.",
'admin.E205',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FormsetCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeFormSet(object):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = FakeFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'formset' must inherit from 'BaseModelFormSet'.",
'admin.E206',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class RealModelFormSet(BaseModelFormSet):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = RealModelFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class CustomModelAdminTests(CheckTestCase):
@ignore_warnings(category=RemovedInDjango19Warning)
def test_deprecation(self):
"Deprecated Custom Validator definitions still work with the check framework."
class CustomValidator(ModelAdminValidator):
def validate_me(self, model_admin, model):
raise ImproperlyConfigured('error!')
class CustomModelAdmin(ModelAdmin):
validator_class = CustomValidator
self.assertIsInvalid(CustomModelAdmin, ValidationTestModel, 'error!')
class ListDisplayEditableTests(CheckTestCase):
def test_list_display_links_is_none(self):
"""
list_display and list_editable can contain the same values
when list_display_links is None
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = list_display
list_display_links = None
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_same_as_list_editable(self):
"""
The first item in list_display can be the same as the first
in list_editable
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['name', 'slug']
list_display_links = ['pub_date']
self.assertIsValid(ProductAdmin, ValidationTestModel)
class ModelAdminPermissionTests(TestCase):
class MockUser(object):
def has_module_perms(self, app_label):
if app_label == "modeladmin":
return True
return False
class MockAddUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.add_band":
return True
return False
class MockChangeUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.change_band":
return True
return False
class MockDeleteUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.delete_band":
return True
return False
def test_has_add_permission(self):
"""
Ensure that has_add_permission returns True for users who can add
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_add_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_add_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_add_permission(request))
def test_has_change_permission(self):
"""
Ensure that has_change_permission returns True for users who can edit
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_change_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_change_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_change_permission(request))
def test_has_delete_permission(self):
"""
Ensure that has_delete_permission returns True for users who can delete
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_delete_permission(request))
def test_has_module_permission(self):
"""
Ensure that has_module_permission returns True for users who have any
permission for the module and False for users who don't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_module_permission(request))
original_app_label = ma.opts.app_label
ma.opts.app_label = 'anotherapp'
try:
request.user = self.MockAddUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_module_permission(request))
finally:
ma.opts.app_label = original_app_label
| 34.490232 | 136 | 0.626657 |
cbe15f090c8bf205f8792697acc4078cc4e83690 | 16,752 | py | Python | scripts/papers/AAAI17/TaxiBJ/exptTaxiBJ_5_frame.py | mxxhcm/DeepST_For_CFP | a52f039453d4d817f2eba16ae8ce828b1b888b43 | [
"MIT"
] | null | null | null | scripts/papers/AAAI17/TaxiBJ/exptTaxiBJ_5_frame.py | mxxhcm/DeepST_For_CFP | a52f039453d4d817f2eba16ae8ce828b1b888b43 | [
"MIT"
] | null | null | null | scripts/papers/AAAI17/TaxiBJ/exptTaxiBJ_5_frame.py | mxxhcm/DeepST_For_CFP | a52f039453d4d817f2eba16ae8ce828b1b888b43 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Usage:
THEANO_FLAGS="device=gpu0" python exptTaxiBJ.py [number_of_residual_units]
"""
from __future__ import print_function
import os
import sys
import cPickle as pickle
import time
import numpy as np
import h5py
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ModelCheckpoint
from deepst.models.STResNet import stresnet
from deepst.config import Config
import deepst.metrics as metrics
from deepst.datasets import TaxiBJ
from deepst.datasets import TaxiBJ_3_frame
from deepst.datasets import TaxiBJ_5_frame
import copy
np.random.seed(1337) # for reproducibility
# parameters
DATAPATH = Config().DATAPATH # data path, you may set your own data path with the global envirmental variable DATAPATH
CACHEDATA = True # cache data or NOT
path_cache = os.path.join(DATAPATH, 'CACHE') # cache path
nb_epoch = 500 # number of epoch at training stage
nb_epoch_cont = 100 # number of epoch at training (cont) stage
batch_size = 32 # batch size
T = 48 # number of time intervals in one day
lr = 0.0002 # learning rate
len_closeness = 3 # length of closeness dependent sequence
len_period = 1 # length of peroid dependent sequence
len_trend = 1 # length of trend dependent sequence
len_closeness_test = 7 # length of closeness dependent sequence
len_period_test = 1 # length of peroid dependent sequence
len_trend_test = 1 # length of trend dependent sequence
if len(sys.argv) == 1:
print(__doc__)
sys.exit(-1)
# nb_residual_unit = 2 # number of residual units
else:
nb_residual_unit = int(sys.argv[1]) # number of residual units
nb_flow = 2 # there are two types of flows: inflow and outflow
# divide data into two subsets: Train & Test, of which the test set is the
# last 4 weeks
days_test = 7 * 4
len_test = T * days_test
map_height, map_width = 32, 32 # grid size
path_result = 'RET'
path_model = 'MODEL'
if os.path.isdir(path_result) is False:
os.mkdir(path_result)
if os.path.isdir(path_model) is False:
os.mkdir(path_model)
if CACHEDATA and os.path.isdir(path_cache) is False:
os.mkdir(path_cache)
def build_model(external_dim):
c_conf = (len_closeness, nb_flow, map_height,
map_width) if len_closeness > 0 else None
p_conf = (len_period, nb_flow, map_height,
map_width) if len_period > 0 else None
t_conf = (len_trend, nb_flow, map_height,
map_width) if len_trend > 0 else None
model = stresnet(c_conf=c_conf, p_conf=p_conf, t_conf=t_conf,
external_dim=external_dim, nb_residual_unit=nb_residual_unit)
adam = Adam(lr=lr)
model.compile(loss='mse', optimizer=adam, metrics=[metrics.rmse])
# model.summary()
# from keras.utils.visualize_util import plot
# plot(model, to_file='model.png', show_shapes=True)
return model
def read_cache(fname):
mmn = pickle.load(open('preprocessing.pkl', 'rb'))
f = h5py.File(fname, 'r')
num = int(f['num'].value)
X_train, Y_train, X_test, Y_test = [], [], [], []
for i in xrange(num):
X_train.append(f['X_train_%i' % i].value)
X_test.append(f['X_test_%i' % i].value)
Y_train = f['Y_train'].value
Y_test = f['Y_test'].value
external_dim = f['external_dim'].value
timestamp_train = f['T_train'].value
timestamp_test = f['T_test'].value
f.close()
return X_train, Y_train, X_test, Y_test, mmn, external_dim, timestamp_train, timestamp_test
def cache(fname, X_train, Y_train, X_test, Y_test, external_dim, timestamp_train, timestamp_test):
h5 = h5py.File(fname, 'w')
h5.create_dataset('num', data=len(X_train))
for i, data in enumerate(X_train):
h5.create_dataset('X_train_%i' % i, data=data)
# for i, data in enumerate(Y_train):
for i, data in enumerate(X_test):
h5.create_dataset('X_test_%i' % i, data=data)
h5.create_dataset('Y_train', data=Y_train)
h5.create_dataset('Y_test', data=Y_test)
external_dim = -1 if external_dim is None else int(external_dim)
h5.create_dataset('external_dim', data=external_dim)
h5.create_dataset('T_train', data=timestamp_train)
h5.create_dataset('T_test', data=timestamp_test)
h5.close()
def main():
# load data
print("loading data...")
ts = time.time()
fname = os.path.join(DATAPATH, 'CACHE', 'TaxiBJ_C{}_P{}_T{}_no_external.h5'.format(
len_closeness, len_period, len_trend))
if os.path.exists(fname) and CACHEDATA:
X_train, Y_train, X_test, Y_test, mmn, external_dim, timestamp_train, timestamp_test = read_cache(
fname)
print("load %s successfully" % fname)
else:
X_train, Y_train, X_test, Y_test, mmn, external_dim, timestamp_train, timestamp_test = TaxiBJ_3_frame.load_data(
T=T, nb_flow=nb_flow, len_closeness=len_closeness, len_period=len_period, len_trend=len_trend, len_test=len_test,
preprocess_name='preprocessing.pkl', meta_data=False, meteorol_data=False, holiday_data=False)
if CACHEDATA:
cache(fname, X_train, Y_train, X_test, Y_test,
external_dim, timestamp_train, timestamp_test)
print("\n days (test): ", [v[:8] for v in timestamp_test[0::T]])
print("\nelapsed time (loading data): %.3f seconds\n" % (time.time() - ts))
print('=' * 10)
print("compiling model...")
print(
"**at the first time, it takes a few minites to compile if you use [Theano] as the backend**")
ts = time.time()
model = build_model(external_dim)
hyperparams_name = 'c{}.p{}.t{}.resunit{}.lr{}'.format(
len_closeness, len_period, len_trend, nb_residual_unit, lr)
fname_param = os.path.join('MODEL', '{}.best.h5'.format(hyperparams_name))
early_stopping = EarlyStopping(monitor='val_rmse', patience=2, mode='min')
model_checkpoint = ModelCheckpoint(
fname_param, monitor='val_rmse', verbose=0, save_best_only=True, mode='min')
print("\nelapsed time (compiling model): %.3f seconds\n" %
(time.time() - ts))
print('=' * 10)
print("training model...")
ts = time.time()
history = model.fit(X_train, Y_train,
nb_epoch=nb_epoch,
batch_size=batch_size,
validation_split=0.1,
callbacks=[early_stopping, model_checkpoint],
verbose=1)
model.save_weights(os.path.join(
'MODEL', '{}.h5'.format(hyperparams_name)), overwrite=True)
pickle.dump((history.history), open(os.path.join(
path_result, '{}.history.pkl'.format(hyperparams_name)), 'wb'))
print("\nelapsed time (training): %.3f seconds\n" % (time.time() - ts))
print('=' * 10)
print('evaluating using the model that has the best loss on the valid set')
ts = time.time()
model.load_weights(fname_param)
score = model.evaluate(X_train, Y_train, batch_size=Y_train.shape[
0] // 48, verbose=0)
print('Train score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
(score[0], score[1], score[1] * (mmn._max - mmn._min) / 2.))
score = model.evaluate(
X_test, Y_test, batch_size=Y_test.shape[0], verbose=0)
print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
(score[0], score[1], score[1] * (mmn._max - mmn._min) / 2.))
print("\nelapsed time (eval): %.3f seconds\n" % (time.time() - ts))
print('=' * 10)
print("training model (cont)...")
ts = time.time()
fname_param = os.path.join(
'MODEL', '{}.cont.best.h5'.format(hyperparams_name))
model_checkpoint = ModelCheckpoint(
fname_param, monitor='rmse', verbose=0, save_best_only=True, mode='min')
history = model.fit(X_train, Y_train, nb_epoch=nb_epoch_cont, verbose=1, batch_size=batch_size, callbacks=[
model_checkpoint])
pickle.dump((history.history), open(os.path.join(
path_result, '{}.cont.history.pkl'.format(hyperparams_name)), 'wb'))
model.save_weights(os.path.join(
'MODEL', '{}_cont.h5'.format(hyperparams_name)), overwrite=True)
print("\nelapsed time (training cont): %.3f seconds\n" % (time.time() - ts))
print('=' * 10)
print('evaluating using the final model')
score = model.evaluate(X_train, Y_train, batch_size=Y_train.shape[
0] // 48, verbose=0)
print('Train score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
(score[0], score[1], score[1] * (mmn._max - mmn._min) / 2.))
ts = time.time()
score = model.evaluate(
X_test, Y_test, batch_size=Y_test.shape[0], verbose=0)
print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
(score[0], score[1], score[1] * (mmn._max - mmn._min) / 2.))
print("\nelapsed time (eval cont): %.3f seconds\n" % (time.time() - ts))
def test():
print("loading data...")
ts = time.time()
fname = os.path.join(DATAPATH, 'CACHE', 'TaxiBJ_C{}_P{}_T{}_no_external.h5'.format(
len_closeness_test, len_period_test, len_trend_test))
if os.path.exists(fname) and CACHEDATA:
X_train, Y_train, X_test, Y_test, mmn, external_dim, timestamp_train, timestamp_test = read_cache(
fname)
print("load %s successfully" % fname)
else:
X_train, Y_train, X_test, Y_test, mmn, external_dim, timestamp_train, timestamp_test = TaxiBJ_5_frame.load_data(
T=T, nb_flow=nb_flow, len_closeness=len_closeness_test, len_period=len_period_test, len_trend=len_trend_test, len_test=len_test,
preprocess_name='preprocessing.pkl', meta_data=False, meteorol_data=False, holiday_data=False)
if CACHEDATA:
cache(fname, X_train, Y_train, X_test, Y_test,
external_dim, timestamp_train, timestamp_test)
print("\n days (test): ", [v[:8] for v in timestamp_test[0::T]])
print("\nelapsed time (loading data): %.3f seconds\n" % (time.time() - ts))
# load best model
ts = time.time()
model = build_model(external_dim)
hyperparams_name = 'c{}.p{}.t{}.resunit{}.lr{}'.format(
len_closeness, len_period, len_trend, nb_residual_unit, lr)
fname_param = os.path.join('MODEL', '{}.cont.best.h5'.format(hyperparams_name))
model.load_weights(fname_param)
X_test_1 = copy.deepcopy(X_test)
Y_test_1 = copy.deepcopy(X_test[0][:, 6:8, :, :])
X_test_1[0] = X_test_1[0][:, 8:14, :, :]
X_test_1[1] = X_test_1[1][:, 8:10, :, :]
X_test_1[2] = X_test_1[2][:, 8:10, :, :]
score = model.evaluate(
X_test_1, Y_test_1, batch_size=Y_test_1.shape[0], verbose=0)
print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
(score[0], score[1], score[1] * (mmn._max - mmn._min) / 2.))
print("\nelapsed time (eval): %.3f seconds\n" % (time.time() - ts))
predict_1 = model.predict(X_test_1)
rmse1 = np.sqrt(np.mean(np.square(predict_1 - Y_test_1)))*(mmn._max - mmn._min) / 2.
print("rmse1", rmse1)
rmse_beijing_one = np.sqrt(np.square(predict_1 - Y_test_1)) * (mmn._max - mmn._min) / 2.
rmse_beijing_one = np.transpose(rmse_beijing_one, [0, 2, 3, 1])
np.savez("deepst_beijing_one", rmse_beijing_one)
X_test_2 = copy.deepcopy(X_test)
Y_test_2 = copy.deepcopy(X_test[0][:, 4:6, :, :])
X_test_2[0] = np.hstack((predict_1, X_test_2[0][:, 8:12, :, :]))
X_test_2[1] = X_test_2[1][:, 6:8, :, :]
X_test_2[2] = X_test_2[2][:, 6:8, :, :]
predict_2 = model.predict(X_test_2)
# X_test_2[0] = np.vstack((X_test_1[0], X_test_2[0]))
# X_test_2[1] = np.vstack((X_test_1[1], X_test_2[1]))
# X_test_2[2] = np.vstack((X_test_1[2], X_test_2[2]))
# Y_test_2 = np.vstack((Y_test_1, Y_test_2))
score = model.evaluate(
X_test_2, Y_test_2, batch_size=Y_test_2.shape[0], verbose=0)
print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
(score[0], score[1], score[1] * (mmn._max - mmn._min) / 2.))
print("\nelapsed time (eval): %.3f seconds\n" % (time.time() - ts))
rmse2 = np.sqrt(np.mean(np.square(predict_1-Y_test_1)+np.square(predict_2-Y_test_2))/2)*(mmn._max - mmn._min) / 2.
print("rmse2", rmse2)
rmse_beijing_two = np.sqrt((np.square(predict_1-Y_test_1)+np.square(predict_2-Y_test_2))/2)*(mmn._max - mmn._min) / 2.
rmse_beijing_two = np.transpose(rmse_beijing_two, [0, 2, 3, 1])
np.savez("deepst_beijing_two", rmse_beijing_two)
X_test_3 = copy.deepcopy(X_test)
Y_test_3 = copy.deepcopy(X_test[0][:, 2:4, :, :])
X_test_3[0] = np.hstack((predict_2, predict_1, X_test_3[0][:, 8:10, :, :]))
X_test_3[1] = X_test_3[1][:, 4:6, :, :]
X_test_3[2] = X_test_3[2][:, 4:6, :, :]
predict_3 = model.predict(X_test_3)
# print(predict_1+predict_2)
#
# X_test_3[0] = np.vstack((X_test_2[0], X_test_3[0]))
# X_test_3[1] = np.vstack((X_test_2[1], X_test_3[1]))
# X_test_3[2] = np.vstack((X_test_2[2], X_test_3[2]))
# Y_test_3 = np.vstack((Y_test_2, Y_test_3))
score = model.evaluate(
X_test_3, Y_test_3, batch_size=Y_test_3.shape[0], verbose=0)
print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
(score[0], score[1], score[1] * (mmn._max - mmn._min) / 2.))
print("\nelapsed time (eval): %.3f seconds\n" % (time.time() - ts))
rmse3 = np.sqrt(np.mean(np.square(predict_1 - Y_test_1) + np.square(predict_2 - Y_test_2) + np.square(predict_3-Y_test_3))/3)*(mmn._max - mmn._min) / 2.
print("rmse3", rmse3)
rmse_beijing_three = np.sqrt((np.square(predict_1 - Y_test_1) + np.square(predict_2 - Y_test_2) + np.square(predict_3-Y_test_3))/3)*(mmn._max - mmn._min) / 2.
rmse_beijing_three = np.transpose(rmse_beijing_three, [0, 2, 3, 1])
np.savez("deepst_beijing_three", rmse_beijing_three)
X_test_4 = copy.deepcopy(X_test)
Y_test_4 = copy.deepcopy(X_test[0][:, 0:2, :, :])
X_test_4[0] = np.hstack((predict_3, predict_2, predict_1))
X_test_4[1] = X_test_4[1][:, 2:4, :, :]
X_test_4[2] = X_test_4[2][:, 2:4, :, :]
predict_4 = model.predict(X_test_4)
# print(predict_1+predict_2)
#
# X_test_3[0] = np.vstack((X_test_2[0], X_test_3[0]))
# X_test_3[1] = np.vstack((X_test_2[1], X_test_3[1]))
# X_test_3[2] = np.vstack((X_test_2[2], X_test_3[2]))
# Y_test_3 = np.vstack((Y_test_2, Y_test_3))
score = model.evaluate(
X_test_4, Y_test_4, batch_size=Y_test_4.shape[0], verbose=0)
print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
(score[0], score[1], score[1] * (mmn._max - mmn._min) / 2.))
print("\nelapsed time (eval): %.3f seconds\n" % (time.time() - ts))
rmse4 = np.sqrt(np.mean(np.square(predict_1 - Y_test_1) + np.square(predict_2 - Y_test_2)
+ np.square(predict_3-Y_test_3)+np.square(predict_4 - Y_test_4))/4)*(mmn._max - mmn._min) / 2.
print("rmse4", rmse4)
rmse_beijing_four = np.sqrt((np.square(predict_1 - Y_test_1) + np.square(predict_2 - Y_test_2)
+ np.square(predict_3-Y_test_3)+np.square(predict_4 - Y_test_4))/4)*(mmn._max - mmn._min) / 2.
rmse_beijing_four = np.transpose(rmse_beijing_four, [0, 2, 3, 1])
np.savez("deepst_beijing_four", rmse_beijing_four)
X_test_5 = copy.deepcopy(X_test)
Y_test_5 = copy.deepcopy(Y_test)
X_test_5[0] = np.hstack((predict_4, predict_3, predict_2))
X_test_5[1] = X_test_5[1][:, 0:2, :, :]
X_test_5[2] = X_test_5[2][:, 0:2, :, :]
predict_5 = model.predict(X_test_5)
# print(predict_1+predict_2)
#
# X_test_3[0] = np.vstack((X_test_2[0], X_test_3[0]))
# X_test_3[1] = np.vstack((X_test_2[1], X_test_3[1]))
# X_test_3[2] = np.vstack((X_test_2[2], X_test_3[2]))
# Y_test_3 = np.vstack((Y_test_2, Y_test_3))
score = model.evaluate(
X_test_5, Y_test_5, batch_size=Y_test_5.shape[0], verbose=0)
print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
(score[0], score[1], score[1] * (mmn._max - mmn._min) / 2.))
print("\nelapsed time (eval): %.3f seconds\n" % (time.time() - ts))
rmse5 = np.sqrt(np.mean(
np.square(predict_1 - Y_test_1) + np.square(predict_2 - Y_test_2) + np.square(predict_3 - Y_test_3)
+np.square(predict_4 - Y_test_4) +np.square(predict_5 - Y_test_5)) / 5) * (
mmn._max - mmn._min) / 2.
print("rmse5", rmse5)
rmse_beijing_five = np.sqrt((
np.square(predict_1 - Y_test_1) + np.square(predict_2 - Y_test_2) + np.square(predict_3 - Y_test_3)
+np.square(predict_4 - Y_test_4) +np.square(predict_5 - Y_test_5)) / 5) * (
mmn._max - mmn._min) / 2.
rmse_beijing_five = np.transpose(rmse_beijing_five, [0, 2, 3, 1])
np.savez("deepst_beijing_five", rmse_beijing_five)
if __name__ == '__main__':
# main()
test()
| 42.734694 | 162 | 0.641894 |
3d66b3896830b0706f85b493eacb9d35ec4beb6a | 370 | py | Python | pochidevhouse/users/apps.py | tarkin88/PochiDevHouse | 656d09123f8a7bb1c400b94121528316c6697339 | [
"MIT"
] | null | null | null | pochidevhouse/users/apps.py | tarkin88/PochiDevHouse | 656d09123f8a7bb1c400b94121528316c6697339 | [
"MIT"
] | null | null | null | pochidevhouse/users/apps.py | tarkin88/PochiDevHouse | 656d09123f8a7bb1c400b94121528316c6697339 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = "pochidevhouse.users"
verbose_name = "Users"
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
try:
import users.signals # noqa F401
except ImportError:
pass
| 21.764706 | 45 | 0.578378 |
37dc1207184eed4d091a81035f515ad6a3bd1012 | 11,417 | py | Python | sdks/python/setup.py | borovikovd/beam | 986cffa160339a2893879f6ece8ea891af373a9f | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | sdks/python/setup.py | borovikovd/beam | 986cffa160339a2893879f6ece8ea891af373a9f | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | sdks/python/setup.py | borovikovd/beam | 986cffa160339a2893879f6ece8ea891af373a9f | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Apache Beam SDK for Python setup file."""
import os
import sys
import warnings
from distutils.errors import DistutilsError
from distutils.version import StrictVersion
# Pylint and isort disagree here.
# pylint: disable=ungrouped-imports
import setuptools
from pkg_resources import DistributionNotFound
from pkg_resources import get_distribution
from pkg_resources import normalize_path
from pkg_resources import to_filename
from setuptools import Command
from setuptools.command.build_py import build_py
from setuptools.command.develop import develop
from setuptools.command.egg_info import egg_info
from setuptools.command.test import test
class mypy(Command):
user_options = []
def initialize_options(self):
"""Abstract method that is required to be overwritten"""
def finalize_options(self):
"""Abstract method that is required to be overwritten"""
def get_project_path(self):
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
project_path = normalize_path(ei_cmd.egg_base)
return os.path.join(project_path, to_filename(ei_cmd.egg_name))
def run(self):
import subprocess
args = ['mypy', self.get_project_path()]
result = subprocess.call(args)
if result != 0:
raise DistutilsError("mypy exited with status %d" % result)
def get_version():
global_names = {}
exec( # pylint: disable=exec-used
open(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'apache_beam/version.py')
).read(),
global_names
)
return global_names['__version__']
PACKAGE_NAME = 'apache-beam'
PACKAGE_VERSION = get_version()
PACKAGE_DESCRIPTION = 'Apache Beam SDK for Python'
PACKAGE_URL = 'https://beam.apache.org'
PACKAGE_DOWNLOAD_URL = 'https://pypi.python.org/pypi/apache-beam'
PACKAGE_AUTHOR = 'Apache Software Foundation'
PACKAGE_EMAIL = 'dev@beam.apache.org'
PACKAGE_KEYWORDS = 'apache beam'
PACKAGE_LONG_DESCRIPTION = '''
Apache Beam is a unified programming model for both batch and streaming
data processing, enabling efficient execution across diverse distributed
execution engines and providing extensibility points for connecting to
different technologies and user communities.
'''
REQUIRED_PIP_VERSION = '7.0.0'
_PIP_VERSION = get_distribution('pip').version
if StrictVersion(_PIP_VERSION) < StrictVersion(REQUIRED_PIP_VERSION):
warnings.warn(
"You are using version {0} of pip. " \
"However, version {1} is recommended.".format(
_PIP_VERSION, REQUIRED_PIP_VERSION
)
)
REQUIRED_CYTHON_VERSION = '0.28.1'
try:
_CYTHON_VERSION = get_distribution('cython').version
if StrictVersion(_CYTHON_VERSION) < StrictVersion(REQUIRED_CYTHON_VERSION):
warnings.warn(
"You are using version {0} of cython. " \
"However, version {1} is recommended.".format(
_CYTHON_VERSION, REQUIRED_CYTHON_VERSION
)
)
except DistributionNotFound:
# do nothing if Cython is not installed
pass
try:
# pylint: disable=wrong-import-position
from Cython.Build import cythonize
except ImportError:
cythonize = lambda *args, **kwargs: []
REQUIRED_PACKAGES = [
# Avro 1.9.2 for python3 was broken. The issue was fixed in version 1.9.2.1
'crcmod>=1.7,<2.0',
# dataclasses backport for python_version<3.7. No version bound because this
# is Python standard since Python 3.7 and each Python version is compatible
# with a specific dataclasses version.
'dataclasses;python_version<"3.7"',
'orjson<4.0',
# Dill doesn't have forwards-compatibility guarantees within minor version.
# Pickles created with a new version of dill may not unpickle using older
# version of dill. It is best to use the same version of dill on client and
# server, therefore list of allowed versions is very narrow.
# See: https://github.com/uqfoundation/dill/issues/341.
'dill>=0.3.1.1,<0.3.2',
'fastavro>=0.21.4,<2',
'grpcio>=1.29.0,<2',
'hdfs>=2.1.0,<3.0.0',
'httplib2>=0.8,<0.20.0',
'numpy>=1.14.3,<1.21.0',
'pymongo>=3.8.0,<4.0.0',
'oauth2client>=2.0.1,<5',
'protobuf>=3.12.2,<4',
'pyarrow>=0.15.1,<6.0.0',
'pydot>=1.2.0,<2',
'python-dateutil>=2.8.0,<3',
'pytz>=2018.3',
'requests>=2.24.0,<3.0.0',
'typing-extensions>=3.7.0,<4',
]
# [BEAM-8181] pyarrow cannot be installed on 32-bit Windows platforms.
if sys.platform == 'win32' and sys.maxsize <= 2**32:
REQUIRED_PACKAGES = [
p for p in REQUIRED_PACKAGES if not p.startswith('pyarrow')
]
REQUIRED_TEST_PACKAGES = [
'freezegun>=0.3.12',
'mock>=1.0.1,<3.0.0',
'pandas<2.0.0',
'parameterized>=0.7.1,<0.8.0',
'pyhamcrest>=1.9,!=1.10.0,<2.0.0',
'pyyaml>=3.12,<6.0.0',
'requests_mock>=1.7,<2.0',
'tenacity>=5.0.2,<6.0',
'pytest>=4.4.0,<5.0',
'pytest-xdist>=1.29.0,<2',
'pytest-timeout>=1.3.3,<2',
'sqlalchemy>=1.3,<2.0',
'psycopg2-binary>=2.8.5,<3.0.0',
'testcontainers>=3.0.3,<4.0.0',
]
GCP_REQUIREMENTS = [
'cachetools>=3.1.0,<5',
'google-apitools>=0.5.31,<0.5.32',
# NOTE: Maintainers, please do not require google-auth>=2.x.x
# Until this issue is closed
# https://github.com/googleapis/google-cloud-python/issues/10566
'google-auth>=1.18.0,<3',
'google-cloud-datastore>=1.8.0,<2',
'google-cloud-pubsub>=0.39.0,<2',
# GCP packages required by tests
'google-cloud-bigquery>=1.6.0,<3',
'google-cloud-bigquery-storage>=2.6.3',
'google-cloud-core>=0.28.1,<2',
'google-cloud-bigtable>=0.31.1,<2',
'google-cloud-spanner>=1.13.0,<2',
'grpcio-gcp>=0.2.2,<1',
# GCP Packages required by ML functionality
'google-cloud-dlp>=0.12.0,<2',
'google-cloud-language>=1.3.0,<2',
'google-cloud-videointelligence>=1.8.0,<2',
'google-cloud-vision>=0.38.0,<2',
'google-cloud-recommendations-ai>=0.1.0,<=0.2.0'
]
INTERACTIVE_BEAM = [
'facets-overview>=1.0.0,<2',
'ipython>=7,<8',
'ipykernel>=5.2.0,<6',
'ipywidgets>=7.6.5,<8',
# Skip version 6.1.13 due to
# https://github.com/jupyter/jupyter_client/issues/637
'jupyter-client>=6.1.11,<6.1.13',
'timeloop>=1.0.2,<2',
]
INTERACTIVE_BEAM_TEST = [
# notebok utils
'nbformat>=5.0.5,<6',
'nbconvert>=6.2.0,<7',
# headless chrome based integration tests
'selenium>=3.141.0,<4',
'needle>=0.5.0,<1',
'chromedriver-binary>=93,<94',
# use a fixed major version of PIL for different python versions
'pillow>=7.1.1,<8',
]
AWS_REQUIREMENTS = ['boto3 >=1.9']
AZURE_REQUIREMENTS = [
'azure-storage-blob >=12.3.2',
'azure-core >=1.7.0',
]
# We must generate protos after setup_requires are installed.
def generate_protos_first(original_cmd):
try:
# See https://issues.apache.org/jira/browse/BEAM-2366
# pylint: disable=wrong-import-position
import gen_protos
class cmd(original_cmd, object):
def run(self):
gen_protos.generate_proto_files()
super().run()
return cmd
except ImportError:
warnings.warn("Could not import gen_protos, skipping proto generation.")
return original_cmd
python_requires = '>=3.6'
if sys.version_info.major == 3 and sys.version_info.minor >= 9:
warnings.warn(
'This version of Apache Beam has not been sufficiently tested on '
'Python %s.%s. You may encounter bugs or missing features.' %
(sys.version_info.major, sys.version_info.minor))
if __name__ == '__main__':
setuptools.setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description=PACKAGE_DESCRIPTION,
long_description=PACKAGE_LONG_DESCRIPTION,
url=PACKAGE_URL,
download_url=PACKAGE_DOWNLOAD_URL,
author=PACKAGE_AUTHOR,
author_email=PACKAGE_EMAIL,
packages=setuptools.find_packages(),
package_data={
'apache_beam': [
'*/*.pyx',
'*/*/*.pyx',
'*/*.pxd',
'*/*/*.pxd',
'*/*.h',
'*/*/*.h',
'testing/data/*.yaml',
'portability/api/*.pyi',
'portability/api/*.yaml',
]
},
ext_modules=cythonize([
# Make sure to use language_level=3 cython directive in files below.
'apache_beam/**/*.pyx',
'apache_beam/coders/coder_impl.py',
'apache_beam/metrics/cells.py',
'apache_beam/metrics/execution.py',
'apache_beam/runners/common.py',
'apache_beam/runners/worker/logger.py',
'apache_beam/runners/worker/opcounters.py',
'apache_beam/runners/worker/operations.py',
'apache_beam/transforms/cy_combiners.py',
'apache_beam/transforms/stats.py',
'apache_beam/utils/counters.py',
'apache_beam/utils/windowed_value.py',
]),
install_requires=REQUIRED_PACKAGES,
python_requires=python_requires,
# BEAM-8840: Do NOT use tests_require or setup_requires.
extras_require={
'docs': [
'Sphinx>=1.5.2,<2.0',
# Pinning docutils as a workaround for Sphinx issue:
# https://github.com/sphinx-doc/sphinx/issues/9727
'docutils==0.17.1'
],
'test': REQUIRED_TEST_PACKAGES,
'gcp': GCP_REQUIREMENTS,
'interactive': INTERACTIVE_BEAM,
'interactive_test': INTERACTIVE_BEAM_TEST,
'aws': AWS_REQUIREMENTS,
'azure': AZURE_REQUIREMENTS,
'dataframe': ['pandas>=1.0,<1.4']
},
zip_safe=False,
# PyPI package information.
classifiers=[
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
# When updating version classifiers, also update version warnings
# above and in apache_beam/__init__.py.
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache License, Version 2.0',
keywords=PACKAGE_KEYWORDS,
cmdclass={
'build_py': generate_protos_first(build_py),
'develop': generate_protos_first(develop),
'egg_info': generate_protos_first(egg_info),
'test': generate_protos_first(test),
'mypy': generate_protos_first(mypy),
},
)
| 33.383041 | 80 | 0.658492 |
2f2027adcb2357a5769d04656e5c6ff3fef71fc0 | 356 | py | Python | tests/test_use_categories.py | betapl3b/ta-hackaton-2021-emerald | 45299cb5d40405bbb3aedd24ceef4acc625c0a40 | [
"MIT"
] | 1 | 2021-09-27T13:52:53.000Z | 2021-09-27T13:52:53.000Z | tests/test_use_categories.py | betapl3b/ta-hackaton-2021-emerald | 45299cb5d40405bbb3aedd24ceef4acc625c0a40 | [
"MIT"
] | null | null | null | tests/test_use_categories.py | betapl3b/ta-hackaton-2021-emerald | 45299cb5d40405bbb3aedd24ceef4acc625c0a40 | [
"MIT"
] | null | null | null | from pytest_bdd import scenario
from tests.steps.common import *
from tests.steps.categories import *
@scenario(
"features/categories.feature",
"Select categories",
)
def test_select_categories(browser):
pass
@scenario(
"features/categories.feature",
"Categories after refresh",
)
def test_categories_after_refresh(browser):
pass | 19.777778 | 43 | 0.75 |
ec5ca98ab1f8bcbb2cd5ee5c756004c4b087d210 | 349 | py | Python | CursoIntensivoPython/Aula15_visualizacao_de_dados/exercicios/cubos.py | SweydAbdul/estudos-python | b052708d0566a0afb9a1c04d035467d45f820879 | [
"MIT"
] | null | null | null | CursoIntensivoPython/Aula15_visualizacao_de_dados/exercicios/cubos.py | SweydAbdul/estudos-python | b052708d0566a0afb9a1c04d035467d45f820879 | [
"MIT"
] | null | null | null | CursoIntensivoPython/Aula15_visualizacao_de_dados/exercicios/cubos.py | SweydAbdul/estudos-python | b052708d0566a0afb9a1c04d035467d45f820879 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
x_values = list(range(1, 5001))
y_values = [x**3 for x in x_values]
# Define o titulo e nomeia a coordenadas
plt.title("Cubos", fontsize=24)
plt.xlabel('Valores', fontsize=14)
plt.ylabel('Valores ao cubo', fontsize=14)
# Grafico de linhas
plt.plot(x_values, y_values)
plt.axis([0, 5001, 0, 50000000001])
plt.show() | 24.928571 | 42 | 0.730659 |
7f758911599478022bda83624845710e0e755bb9 | 1,216 | py | Python | playListDownloader.py | T4puSD/YoutubeDownloader | 7ddfa76083bbd8309db0b179372629e47d536675 | [
"MIT"
] | null | null | null | playListDownloader.py | T4puSD/YoutubeDownloader | 7ddfa76083bbd8309db0b179372629e47d536675 | [
"MIT"
] | null | null | null | playListDownloader.py | T4puSD/YoutubeDownloader | 7ddfa76083bbd8309db0b179372629e47d536675 | [
"MIT"
] | null | null | null | import concurrent.futures
from getuList import SongListGenerator
from debugger import logging
from advancedyvdown import start_audio_download, start_video_download
#url = 'https://www.youtube.com/playlist?list=PLRiSVT9MWtYwyhhgVNnRDTpCRvF-t2lc8'
url = input("Enter a valid YouTube Playlist Link:")
while not url.startswith('https://www.youtube.com/playlist?list'):
url = input("Enter a valid YouTube Playlist Link:")
av = ['a','v']
aud_or_vid = None
while not aud_or_vid in av:
aud_or_vid = input("Download in audio or video (a/v)?")
song_list = SongListGenerator().generateList(url)
song_batchs_of_three = [song_list[i:i+3] for i in range(0,len(song_list),3)]
# print(song_batchs_of_three)
# print(song_list)
if len(song_list) > 0:
try:
if aud_or_vid == av[0]:
for batch_one in song_batchs_of_three:
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(start_audio_download,batch_one)
elif aud_or_vid == av[1]:
for batch_one in song_batchs_of_three:
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(start_video_download,batch_one)
except Exception as e:
logging.debug("Exception occured running threadpool executor")
logging.debug(e) | 36.848485 | 81 | 0.769737 |
eb941aef1a2e21b53af9f7982a62f1392278e7bd | 311 | pyw | Python | launcher.pyw | exoticknight/ConvertFlow | 746f431187478de170fc63433e3b10fe1dfab9d0 | [
"MIT"
] | 32 | 2016-11-25T14:45:09.000Z | 2021-10-06T12:24:12.000Z | launcher.pyw | exoticknight/ConvertFlow | 746f431187478de170fc63433e3b10fe1dfab9d0 | [
"MIT"
] | null | null | null | launcher.pyw | exoticknight/ConvertFlow | 746f431187478de170fc63433e3b10fe1dfab9d0 | [
"MIT"
] | 8 | 2016-12-09T05:33:49.000Z | 2020-08-28T11:25:31.000Z | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
import json
import os
import subprocess
with open("launcher.config") as f:
data = json.load(f)
try:
subprocess.Popen(os.path.abspath(data["pathToElectron"]) + " " + os.path.abspath(data["entry"]), shell=False)
except Exception, e:
pass | 23.923077 | 117 | 0.649518 |
1dbb20d924e641bff14e6c4b41d4c2468053fa8d | 16,506 | py | Python | env/lib/python3.8/site-packages/Crypto/SelfTest/Protocol/test_KDF.py | juansjimenez/goodocity-backend | 77b2ab3f11047e2896e81358b8d8c63d7952b521 | [
"MIT"
] | 1 | 2020-09-26T02:27:05.000Z | 2020-09-26T02:27:05.000Z | env/lib/python3.8/site-packages/Crypto/SelfTest/Protocol/test_KDF.py | juansjimenez/goodocity-backend | 77b2ab3f11047e2896e81358b8d8c63d7952b521 | [
"MIT"
] | 12 | 2021-10-18T00:37:47.000Z | 2022-03-09T01:35:20.000Z | env/lib/python3.8/site-packages/Crypto/SelfTest/Protocol/test_KDF.py | juansjimenez/goodocity-backend | 77b2ab3f11047e2896e81358b8d8c63d7952b521 | [
"MIT"
] | 1 | 2018-07-06T03:48:08.000Z | 2018-07-06T03:48:08.000Z | # -*- coding: utf-8 -*-
#
# SelfTest/Protocol/test_KDF.py: Self-test for key derivation functions
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
import unittest
from binascii import unhexlify
from Crypto.Util.py3compat import *
from Crypto.SelfTest.st_common import list_test_cases
from Crypto.Hash import SHA1, HMAC, SHA256
from Crypto.Cipher import AES, DES3
from Crypto.Protocol.KDF import PBKDF1, PBKDF2, _S2V, HKDF, scrypt
def t2b(t):
if t is None:
return None
t2 = t.replace(" ", "").replace("\n", "")
return unhexlify(b(t2))
class TestVector(object):
pass
class PBKDF1_Tests(unittest.TestCase):
# List of tuples with test data.
# Each tuple is made up by:
# Item #0: a pass phrase
# Item #1: salt (8 bytes encoded in hex)
# Item #2: output key length
# Item #3: iterations to use
# Item #4: expected result (encoded in hex)
_testData = (
# From http://www.di-mgt.com.au/cryptoKDFs.html#examplespbkdf
("password","78578E5A5D63CB06",16,1000,"DC19847E05C64D2FAF10EBFB4A3D2A20"),
)
def test1(self):
v = self._testData[0]
res = PBKDF1(v[0], t2b(v[1]), v[2], v[3], SHA1)
self.assertEqual(res, t2b(v[4]))
class PBKDF2_Tests(unittest.TestCase):
# List of tuples with test data.
# Each tuple is made up by:
# Item #0: a pass phrase
# Item #1: salt (encoded in hex)
# Item #2: output key length
# Item #3: iterations to use
# Item #4: expected result (encoded in hex)
_testData = (
# From http://www.di-mgt.com.au/cryptoKDFs.html#examplespbkdf
("password","78578E5A5D63CB06",24,2048,"BFDE6BE94DF7E11DD409BCE20A0255EC327CB936FFE93643"),
# From RFC 6050
("password","73616c74", 20, 1, "0c60c80f961f0e71f3a9b524af6012062fe037a6"),
("password","73616c74", 20, 2, "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957"),
("password","73616c74", 20, 4096, "4b007901b765489abead49d926f721d065a429c1"),
("passwordPASSWORDpassword","73616c7453414c5473616c7453414c5473616c7453414c5473616c7453414c5473616c74",
25, 4096, "3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038"),
( 'pass\x00word',"7361006c74",16,4096, "56fa6aa75548099dcc37d7f03425e0c3"),
)
def test1(self):
# Test only for HMAC-SHA1 as PRF
def prf(p,s):
return HMAC.new(p,s,SHA1).digest()
for i in range(len(self._testData)):
v = self._testData[i]
res = PBKDF2(v[0], t2b(v[1]), v[2], v[3])
res2 = PBKDF2(v[0], t2b(v[1]), v[2], v[3], prf)
self.assertEqual(res, t2b(v[4]))
self.assertEqual(res, res2)
def test2(self):
"""From draft-josefsson-scrypt-kdf-01, Chapter 10"""
output_1 = t2b("""
55 ac 04 6e 56 e3 08 9f ec 16 91 c2 25 44 b6 05
f9 41 85 21 6d de 04 65 e6 8b 9d 57 c2 0d ac bc
49 ca 9c cc f1 79 b6 45 99 16 64 b3 9d 77 ef 31
7c 71 b8 45 b1 e3 0b d5 09 11 20 41 d3 a1 97 83
""")
output_2 = t2b("""
4d dc d8 f6 0b 98 be 21 83 0c ee 5e f2 27 01 f9
64 1a 44 18 d0 4c 04 14 ae ff 08 87 6b 34 ab 56
a1 d4 25 a1 22 58 33 54 9a db 84 1b 51 c9 b3 17
6a 27 2b de bb a1 d0 78 47 8f 62 b3 97 f3 3c 8d
""")
prf_hmac_sha256 = lambda p, s: HMAC.new(p, s, SHA256).digest()
output = PBKDF2(b("passwd"), b("salt"), 64, 1, prf=prf_hmac_sha256)
self.assertEqual(output, output_1)
output = PBKDF2(b("Password"), b("NaCl"), 64, 80000, prf=prf_hmac_sha256)
self.assertEqual(output, output_2)
class S2V_Tests(unittest.TestCase):
# Sequence of test vectors.
# Each test vector is made up by:
# Item #0: a tuple of strings
# Item #1: an AES key
# Item #2: the result
# Item #3: the cipher module S2V is based on
# Everything is hex encoded
_testData = [
# RFC5297, A.1
(
( '101112131415161718191a1b1c1d1e1f2021222324252627',
'112233445566778899aabbccddee' ),
'fffefdfcfbfaf9f8f7f6f5f4f3f2f1f0',
'85632d07c6e8f37f950acd320a2ecc93',
AES
),
# RFC5297, A.2
(
( '00112233445566778899aabbccddeeffdeaddadadeaddadaffeeddcc'+
'bbaa99887766554433221100',
'102030405060708090a0',
'09f911029d74e35bd84156c5635688c0',
'7468697320697320736f6d6520706c61'+
'696e7465787420746f20656e63727970'+
'74207573696e67205349562d414553'),
'7f7e7d7c7b7a79787776757473727170',
'7bdb6e3b432667eb06f4d14bff2fbd0f',
AES
),
]
def test1(self):
"""Verify correctness of test vector"""
for tv in self._testData:
s2v = _S2V.new(t2b(tv[1]), tv[3])
for s in tv[0]:
s2v.update(t2b(s))
result = s2v.derive()
self.assertEqual(result, t2b(tv[2]))
def test2(self):
"""Verify that no more than 127(AES) and 63(TDES)
components are accepted."""
key = bchr(0) * 8 + bchr(255) * 8
for module in (AES, DES3):
s2v = _S2V.new(key, module)
max_comps = module.block_size*8-1
for i in range(max_comps):
s2v.update(b("XX"))
self.assertRaises(TypeError, s2v.update, b("YY"))
class HKDF_Tests(unittest.TestCase):
# Test vectors from RFC5869, Appendix A
# Each tuple is made up by:
# Item #0: hash module
# Item #1: secret
# Item #2: salt
# Item #3: context
# Item #4: expected result
_test_vector = (
(
SHA256,
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
"000102030405060708090a0b0c",
"f0f1f2f3f4f5f6f7f8f9",
42,
"3cb25f25faacd57a90434f64d0362f2a" +
"2d2d0a90cf1a5a4c5db02d56ecc4c5bf" +
"34007208d5b887185865"
),
(
SHA256,
"000102030405060708090a0b0c0d0e0f" +
"101112131415161718191a1b1c1d1e1f" +
"202122232425262728292a2b2c2d2e2f" +
"303132333435363738393a3b3c3d3e3f" +
"404142434445464748494a4b4c4d4e4f",
"606162636465666768696a6b6c6d6e6f" +
"707172737475767778797a7b7c7d7e7f" +
"808182838485868788898a8b8c8d8e8f" +
"909192939495969798999a9b9c9d9e9f" +
"a0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebf" +
"c0c1c2c3c4c5c6c7c8c9cacbcccdcecf" +
"d0d1d2d3d4d5d6d7d8d9dadbdcdddedf" +
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef" +
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
82,
"b11e398dc80327a1c8e7f78c596a4934" +
"4f012eda2d4efad8a050cc4c19afa97c" +
"59045a99cac7827271cb41c65e590e09" +
"da3275600c2f09b8367793a9aca3db71" +
"cc30c58179ec3e87c14c01d5c1f3434f" +
"1d87"
),
(
SHA256,
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
None,
None,
42,
"8da4e775a563c18f715f802a063c5a31" +
"b8a11f5c5ee1879ec3454e5f3c738d2d" +
"9d201395faa4b61a96c8"
),
(
SHA1,
"0b0b0b0b0b0b0b0b0b0b0b",
"000102030405060708090a0b0c",
"f0f1f2f3f4f5f6f7f8f9",
42,
"085a01ea1b10f36933068b56efa5ad81" +
"a4f14b822f5b091568a9cdd4f155fda2" +
"c22e422478d305f3f896"
),
(
SHA1,
"000102030405060708090a0b0c0d0e0f" +
"101112131415161718191a1b1c1d1e1f" +
"202122232425262728292a2b2c2d2e2f" +
"303132333435363738393a3b3c3d3e3f" +
"404142434445464748494a4b4c4d4e4f",
"606162636465666768696a6b6c6d6e6f" +
"707172737475767778797a7b7c7d7e7f" +
"808182838485868788898a8b8c8d8e8f" +
"909192939495969798999a9b9c9d9e9f" +
"a0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebf" +
"c0c1c2c3c4c5c6c7c8c9cacbcccdcecf" +
"d0d1d2d3d4d5d6d7d8d9dadbdcdddedf" +
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef" +
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
82,
"0bd770a74d1160f7c9f12cd5912a06eb" +
"ff6adcae899d92191fe4305673ba2ffe" +
"8fa3f1a4e5ad79f3f334b3b202b2173c" +
"486ea37ce3d397ed034c7f9dfeb15c5e" +
"927336d0441f4c4300e2cff0d0900b52" +
"d3b4"
),
(
SHA1,
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
"",
"",
42,
"0ac1af7002b3d761d1e55298da9d0506" +
"b9ae52057220a306e07b6b87e8df21d0" +
"ea00033de03984d34918"
),
(
SHA1,
"0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c",
None,
"",
42,
"2c91117204d745f3500d636a62f64f0a" +
"b3bae548aa53d423b0d1f27ebba6f5e5" +
"673a081d70cce7acfc48"
)
)
def test1(self):
for tv in self._test_vector:
secret, salt, info, exp = [ t2b(tv[x]) for x in (1,2,3,5) ]
key_len, hashmod = [ tv[x] for x in (4,0) ]
output = HKDF(secret, key_len, salt, hashmod, 1, info)
self.assertEqual(output, exp)
def test2(self):
ref = HKDF(b("XXXXXX"), 12, b("YYYY"), SHA1)
# Same output, but this time split over 2 keys
key1, key2 = HKDF(b("XXXXXX"), 6, b("YYYY"), SHA1, 2)
self.assertEqual((ref[:6], ref[6:]), (key1, key2))
# Same output, but this time split over 3 keys
key1, key2, key3 = HKDF(b("XXXXXX"), 4, b("YYYY"), SHA1, 3)
self.assertEqual((ref[:4], ref[4:8], ref[8:]), (key1, key2, key3))
class scrypt_Tests(unittest.TestCase):
# Test vectors taken from
# http://tools.ietf.org/html/draft-josefsson-scrypt-kdf-00
data = (
(
"",
"",
16, # 2K
1,
1,
"""
77 d6 57 62 38 65 7b 20 3b 19 ca 42 c1 8a 04 97
f1 6b 48 44 e3 07 4a e8 df df fa 3f ed e2 14 42
fc d0 06 9d ed 09 48 f8 32 6a 75 3a 0f c8 1f 17
e8 d3 e0 fb 2e 0d 36 28 cf 35 e2 0c 38 d1 89 06
"""
),
(
"password",
"NaCl",
1024, # 1M
8,
16,
"""
fd ba be 1c 9d 34 72 00 78 56 e7 19 0d 01 e9 fe
7c 6a d7 cb c8 23 78 30 e7 73 76 63 4b 37 31 62
2e af 30 d9 2e 22 a3 88 6f f1 09 27 9d 98 30 da
c7 27 af b9 4a 83 ee 6d 83 60 cb df a2 cc 06 40
"""
),
(
"pleaseletmein",
"SodiumChloride",
16384, # 16M
8,
1,
"""
70 23 bd cb 3a fd 73 48 46 1c 06 cd 81 fd 38 eb
fd a8 fb ba 90 4f 8e 3e a9 b5 43 f6 54 5d a1 f2
d5 43 29 55 61 3f 0f cf 62 d4 97 05 24 2a 9a f9
e6 1e 85 dc 0d 65 1e 40 df cf 01 7b 45 57 58 87
"""
),
(
"pleaseletmein",
"SodiumChloride",
1048576, # 1G
8,
1,
"""
21 01 cb 9b 6a 51 1a ae ad db be 09 cf 70 f8 81
ec 56 8d 57 4a 2f fd 4d ab e5 ee 98 20 ad aa 47
8e 56 fd 8f 4b a5 d0 9f fa 1c 6d 92 7c 40 f4 c3
37 30 40 49 e8 a9 52 fb cb f4 5c 6f a7 7a 41 a4
"""
),
)
def setUp(self):
new_test_vectors = []
for tv in self.data:
new_tv = TestVector()
new_tv.P = b(tv[0])
new_tv.S = b(tv[1])
new_tv.N = tv[2]
new_tv.r = tv[3]
new_tv.p = tv[4]
new_tv.output = t2b(tv[5])
new_tv.dkLen = len(new_tv.output)
new_test_vectors.append(new_tv)
self.data = new_test_vectors
def _test1(self):
b_input = t2b("""
f7 ce 0b 65 3d 2d 72 a4 10 8c f5 ab e9 12 ff dd
77 76 16 db bb 27 a7 0e 82 04 f3 ae 2d 0f 6f ad
89 f6 8f 48 11 d1 e8 7b cc 3b d7 40 0a 9f fd 29
09 4f 01 84 63 95 74 f3 9a e5 a1 31 52 17 bc d7
89 49 91 44 72 13 bb 22 6c 25 b5 4d a8 63 70 fb
cd 98 43 80 37 46 66 bb 8f fc b5 bf 40 c2 54 b0
67 d2 7c 51 ce 4a d5 fe d8 29 c9 0b 50 5a 57 1b
7f 4d 1c ad 6a 52 3c da 77 0e 67 bc ea af 7e 89
""")
b_output = t2b("""
79 cc c1 93 62 9d eb ca 04 7f 0b 70 60 4b f6 b6
2c e3 dd 4a 96 26 e3 55 fa fc 61 98 e6 ea 2b 46
d5 84 13 67 3b 99 b0 29 d6 65 c3 57 60 1f b4 26
a0 b2 f4 bb a2 00 ee 9f 0a 43 d1 9b 57 1a 9c 71
ef 11 42 e6 5d 5a 26 6f dd ca 83 2c e5 9f aa 7c
ac 0b 9c f1 be 2b ff ca 30 0d 01 ee 38 76 19 c4
ae 12 fd 44 38 f2 03 a0 e4 e1 c4 7e c3 14 86 1f
4e 90 87 cb 33 39 6a 68 73 e8 f9 d2 53 9a 4b 8e
""")
from Crypto.Protocol.KDF import _scryptROMix
output = _scryptROMix(b_input, 16)
self.assertEqual(output, b_output)
def test2(self):
for tv in self.data:
# TODO: add runtime flag to enable test vectors
# with humongous memory usage
if tv.N > 100000:
continue
output = scrypt(tv.P, tv.S, tv.dkLen, tv.N, tv.r, tv.p)
self.assertEqual(output, tv.output)
def test3(self):
ref = scrypt(b("password"), b("salt"), 12, 16, 1, 1)
# Same output, but this time split over 2 keys
key1, key2 = scrypt(b("password"), b("salt"), 6, 16, 1, 1, 2)
self.assertEqual((ref[:6], ref[6:]), (key1, key2))
# Same output, but this time split over 3 keys
key1, key2, key3 = scrypt(b("password"), b("salt"), 4, 16, 1, 1, 3)
self.assertEqual((ref[:4], ref[4:8], ref[8:]), (key1, key2, key3))
def get_tests(config={}):
tests = []
tests += list_test_cases(PBKDF1_Tests)
tests += list_test_cases(PBKDF2_Tests)
tests += list_test_cases(S2V_Tests)
tests += list_test_cases(HKDF_Tests)
tests += list_test_cases(scrypt_Tests)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4
| 36.68 | 115 | 0.530231 |
e59be86d39a4d880eecd3b3712d1c0f4ec986453 | 788 | py | Python | models/player.py | KDerec/chesstournamentmanager | 2b57d2703d654e4ffc3c44293a031bd596463ba0 | [
"MIT"
] | null | null | null | models/player.py | KDerec/chesstournamentmanager | 2b57d2703d654e4ffc3c44293a031bd596463ba0 | [
"MIT"
] | null | null | null | models/player.py | KDerec/chesstournamentmanager | 2b57d2703d654e4ffc3c44293a031bd596463ba0 | [
"MIT"
] | null | null | null | """Define player object."""
class Player:
possible_sexe = {0: "Homme", 1: "Femme"}
def __init__(self, last_name, first_name, birthday, sexe, rank):
"""Initiate player object."""
self.last_name = last_name
self.first_name = first_name
self.birthday = birthday
self.sexe = sexe
self.rank = rank
def update_player_rank(self, new_rank):
"""Update player rank attribut."""
self.rank = new_rank
class DictToPlayer(Player):
def __init__(self, dict, last_name=False, first_name=False, birthday=False, sexe=False, rank=False):
"""Initiate dict to player object."""
super().__init__(last_name, first_name, birthday, sexe, rank)
for key in dict:
setattr(self, key, dict[key])
| 29.185185 | 104 | 0.629442 |
c7f1fb72d5840ea5196517067f27b1b8707d6c23 | 2,213 | py | Python | nonebot_plugin_epicfree/__init__.py | monsterxcn/nonebot_plugin_epicfree | 5086cd043e486485bd9705e776965c397a4aa45f | [
"MIT"
] | 15 | 2021-08-16T13:16:52.000Z | 2022-03-15T17:13:04.000Z | nonebot_plugin_epicfree/__init__.py | monsterxcn/nonebot_plugin_epicfree | 5086cd043e486485bd9705e776965c397a4aa45f | [
"MIT"
] | 4 | 2021-08-17T01:52:13.000Z | 2022-02-19T14:21:42.000Z | nonebot_plugin_epicfree/__init__.py | monsterxcn/nonebot_plugin_epicfree | 5086cd043e486485bd9705e776965c397a4aa45f | [
"MIT"
] | 7 | 2021-09-19T12:10:34.000Z | 2022-03-13T12:33:44.000Z | import sys
import nonebot
from nonebot import on_regex, require
from nonebot.adapters.cqhttp import Bot, Event, Message
from nonebot.adapters.cqhttp.event import GroupMessageEvent
from nonebot.exception import FinishedException
from nonebot.log import logger
from nonebot.typing import T_State
from .data_source import getEpicFree, subscribeHelper
epicScheduler = nonebot.get_driver().config.epic_scheduler
if not epicScheduler:
epicScheduler = "5 8 8 8"
day_of_week, hour, minute, second = epicScheduler.split(" ")
epicMatcher = on_regex("((E|e)(P|p)(I|i)(C|c))?喜(加一|\+1)", priority=2)
@epicMatcher.handle()
async def onceHandle(bot: Bot, event: Event, state: T_State):
imfree = await getEpicFree()
await epicMatcher.finish(Message(imfree))
epicSubMatcher = on_regex("喜(加一|\+1)(私聊)?订阅", priority=1)
@epicSubMatcher.handle()
async def subHandle(bot: Bot, event: GroupMessageEvent, state: T_State):
if event.sender.role not in ["admin", "owner"] or "私聊" in event.get_plaintext():
# 普通群员只会启用私聊订阅
# state["targetId"] = event.get_user_id()
state["subType"] = "私聊"
else:
# 管理员用户询问需要私聊订阅还是群聊订阅
pass
@epicSubMatcher.got("subType", prompt="默认启用群聊订阅,如需私聊订阅请回复「私聊」")
async def subEpic(bot: Bot, event: GroupMessageEvent, state: T_State):
if "私聊" in state["subType"]:
state["targetId"] = event.get_user_id()
state["subType"] = "私聊"
else:
state["targetId"] = str(event.group_id)
state["subType"] = "群聊"
msg = await subscribeHelper("w", state["subType"], state["targetId"])
await epicSubMatcher.finish(msg)
scheduler = require("nonebot_plugin_apscheduler").scheduler
@scheduler.scheduled_job("cron", day_of_week=day_of_week, hour=hour, minute=minute, second=second)
async def weeklyEpic():
bot = nonebot.get_bot()
whoSubscribe = await subscribeHelper()
imfree = await getEpicFree()
try:
for group in whoSubscribe["群聊"]:
await bot.send_group_msg(group_id=group, message=Message(imfree))
for private in whoSubscribe["私聊"]:
await bot.send_private_msg(user_id=private, message=Message(imfree))
except FinishedException:
pass
except Exception as e:
logger.error("Epic 限免游戏资讯定时任务出错:" + str(sys.exc_info()[0]) + "\n" + str(e))
| 33.530303 | 98 | 0.727519 |
d81f885306678bf8cf6e47d04c6b68e422d7c59a | 4,130 | py | Python | tests/test_controlflow_pytorch.py | c4dt/mlbench-core | 8a5cf6e00ff4535b2aea23b213241858a5ee5f00 | [
"Apache-2.0"
] | null | null | null | tests/test_controlflow_pytorch.py | c4dt/mlbench-core | 8a5cf6e00ff4535b2aea23b213241858a5ee5f00 | [
"Apache-2.0"
] | null | null | null | tests/test_controlflow_pytorch.py | c4dt/mlbench-core | 8a5cf6e00ff4535b2aea23b213241858a5ee5f00 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `mlbench_core.controlflow.pytorch` package."""
import itertools
import random
import pytest
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from mlbench_core.controlflow.pytorch.helpers import (
convert_dtype,
iterate_dataloader,
maybe_range,
)
from mlbench_core.controlflow.pytorch.train_validation import TrainValidation
from mlbench_core.evaluation.pytorch.metrics import TopKAccuracy
from mlbench_core.lr_scheduler.pytorch import multistep_learning_rates_with_warmup
@pytest.fixture
def model():
return nn.Linear(1, 2)
@pytest.fixture
def optimizer(model):
return optim.SGD(model.parameters(), lr=0.1)
@pytest.fixture
def loss_function():
return nn.CrossEntropyLoss()
@pytest.fixture
def metrics():
return [TopKAccuracy(topk=1)]
@pytest.fixture
def scheduler(optimizer):
return multistep_learning_rates_with_warmup(
optimizer,
1,
0.1,
0.1,
[5, 10],
warmup_duration=2,
warmup_linear_scaling=False,
warmup_lr=0.2,
)
def test_instantiation(mocker, model, optimizer, loss_function, metrics, scheduler):
mocker.patch("mlbench_core.controlflow.pytorch.train_validation.dist")
batch_size = 2
tv = TrainValidation(
model,
optimizer,
loss_function,
metrics,
scheduler,
batch_size,
10,
0,
1,
1,
"fp32",
)
assert tv is not None
def test_training(mocker, model, optimizer, loss_function, metrics, scheduler):
mocker.patch("mlbench_core.controlflow.pytorch.train_validation.dist")
mocker.patch("mlbench_core.utils.pytorch.distributed.dist")
mocker.patch("mlbench_core.utils.tracker.LogMetrics")
batch_size = 2
tv = TrainValidation(
model,
optimizer,
loss_function,
metrics,
scheduler,
batch_size,
10,
0,
1,
1,
"fp32",
)
train_set = [random.random() for _ in range(100)]
train_set = [
(
torch.FloatTensor([n * 50 - 25]),
1 if (n > 0.5) != (random.random() < 0.1) else 0,
)
for n in train_set
]
test_set = [random.random() for _ in range(10)]
test_set = [
(
torch.FloatTensor([n * 50 - 25]),
1 if (n > 0.5) != (random.random() < 0.1) else 0,
)
for n in test_set
]
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)
tv.run(
dataloader_train_fn=lambda: train_loader,
dataloader_val_fn=lambda: test_loader,
repartition_per_epoch=True,
)
assert tv.tracker.current_epoch == 10
assert tv.tracker.best_epoch > -1
assert tv.tracker.best_metric_value > 50.0
def test_maybe_range():
r = maybe_range(10)
assert len(r) == 10
assert r == range(10)
r = maybe_range(None)
assert isinstance(r, itertools.count)
assert next(r) == 0
assert next(r) == 1
def test_convert_dtype():
t = torch.IntTensor([0])
tt = convert_dtype("fp32", t)
assert tt.dtype == torch.float32
tt2 = convert_dtype("fp64", t)
assert tt2.dtype == torch.float64
with pytest.raises(NotImplementedError):
tt3 = convert_dtype("int", t)
def test_iterate_dataloader(mocker):
dataloader = [
(torch.IntTensor([0]), torch.IntTensor([1])),
(torch.IntTensor([2]), torch.IntTensor([3])),
]
it = iterate_dataloader(
dataloader, "fp32", max_batch_per_epoch=2, transform_target_type=True
)
first = next(it)
assert first[0].dtype == torch.float32
assert first[1].dtype == torch.float32
assert first[0].data.item() == 0.0
assert first[1].item() == 1.0
second = next(it)
assert second[0].dtype == torch.float32
assert second[1].dtype == torch.float32
assert second[0].data.item() == 2.0
assert second[1].item() == 3.0
| 22.445652 | 84 | 0.634867 |
193bea4afb2dfe35041a4ece121f8d686b4b3f7d | 9,111 | py | Python | p2p/state.py | theresume/py-evm | c7f982e9832ea91312f456cfdd5be7c867853d0b | [
"MIT"
] | null | null | null | p2p/state.py | theresume/py-evm | c7f982e9832ea91312f456cfdd5be7c867853d0b | [
"MIT"
] | 4 | 2018-12-07T21:32:48.000Z | 2019-02-22T15:25:01.000Z | p2p/state.py | theresume/py-evm | c7f982e9832ea91312f456cfdd5be7c867853d0b | [
"MIT"
] | null | null | null | import asyncio
import logging
import time
from typing import ( # noqa: F401
Any,
cast,
Dict,
List,
Set,
)
import rlp
from trie.sync import HexaryTrieSync
from trie.exceptions import SyncRequestAlreadyProcessed
from eth_keys import datatypes # noqa: F401
from eth_utils import (
decode_hex,
encode_hex,
keccak,
)
from evm.constants import (
BLANK_ROOT_HASH,
EMPTY_SHA3,
)
from evm.db.backends.base import BaseDB
from evm.rlp.accounts import Account
from p2p import eth
from p2p.cancel_token import CancelToken
from p2p.exceptions import OperationCancelled
from p2p.peer import BasePeer, ETHPeer, PeerPool, PeerPoolSubscriber
class StateDownloader(PeerPoolSubscriber):
logger = logging.getLogger("p2p.state.StateDownloader")
_pending_nodes = {} # type: Dict[Any, float]
_total_processed_nodes = 0
_report_interval = 10 # Number of seconds between progress reports.
# TODO: Experiment with different timeout/max_pending values to find the combination that
# yields the best results.
# FIXME: Should use the # of peers times MAX_STATE_FETCH here
_max_pending = 5 * eth.MAX_STATE_FETCH
_reply_timeout = 10 # seconds
# For simplicity/readability we use 0 here to force a report on the first iteration of the
# loop.
_last_report_time = 0
def __init__(self, state_db: BaseDB, root_hash: bytes, peer_pool: PeerPool) -> None:
self.peer_pool = peer_pool
self.peer_pool.subscribe(self)
self.root_hash = root_hash
self.scheduler = StateSync(root_hash, state_db, self.logger)
self._running_peers = set() # type: Set[ETHPeer]
self.cancel_token = CancelToken('StateDownloader')
def register_peer(self, peer: BasePeer) -> None:
asyncio.ensure_future(self.handle_peer(cast(ETHPeer, peer)))
async def handle_peer(self, peer: ETHPeer) -> None:
"""Handle the lifecycle of the given peer."""
self._running_peers.add(peer)
try:
await self._handle_peer(peer)
finally:
self._running_peers.remove(peer)
async def _handle_peer(self, peer: ETHPeer) -> None:
while True:
try:
cmd, msg = await peer.read_sub_proto_msg(self.cancel_token)
except OperationCancelled:
# Either our cancel token or the peer's has been triggered, so break out of the
# loop.
break
if isinstance(cmd, eth.NodeData):
self.logger.debug("Processing NodeData with %d entries", len(msg))
for node in msg:
self._total_processed_nodes += 1
node_key = keccak(node)
try:
self.scheduler.process([(node_key, node)])
except SyncRequestAlreadyProcessed:
# This means we received a node more than once, which can happen when we
# retry after a timeout.
pass
# A node may be received more than once, so pop() with a default value.
self._pending_nodes.pop(node_key, None)
else:
# It'd be very convenient if we could ignore everything that is not a NodeData
# when doing a StateSync, but need to double check because peers may consider that
# "Bad Form" and disconnect from us.
self.logger.debug("Ignoring %s(%s) while doing a StateSync", cmd, msg)
async def stop(self):
self.cancel_token.trigger()
self.peer_pool.unsubscribe(self)
while self._running_peers:
self.logger.debug("Waiting for %d running peers to finish", len(self._running_peers))
await asyncio.sleep(0.1)
async def request_next_batch(self):
requests = self.scheduler.next_batch(eth.MAX_STATE_FETCH)
if not requests:
# Although our run() loop frequently yields control to let our msg handler process
# received nodes (scheduling new requests), there may be cases when the pending nodes
# take a while to arrive thus causing the scheduler to run out of new requests for a
# while.
self.logger.debug("Scheduler queue is empty, not requesting any nodes")
return
self.logger.debug("Requesting %d trie nodes", len(requests))
await self.request_nodes([request.node_key for request in requests])
async def request_nodes(self, node_keys: List[bytes]) -> None:
# FIXME: Need a better criteria to select peers here.
peer = await self.peer_pool.get_random_peer()
now = time.time()
for node_key in node_keys:
self._pending_nodes[node_key] = now
cast(ETHPeer, peer).sub_proto.send_get_node_data(node_keys)
async def retry_timedout(self):
timed_out = []
now = time.time()
for node_key, req_time in list(self._pending_nodes.items()):
if now - req_time > self._reply_timeout:
timed_out.append(node_key)
if not timed_out:
return
self.logger.debug("Re-requesting %d trie nodes", len(timed_out))
await self.request_nodes(timed_out)
async def run(self):
self.logger.info("Starting state sync for root hash %s", encode_hex(self.root_hash))
while self.scheduler.has_pending_requests and not self.cancel_token.triggered:
# Request new nodes if we haven't reached the limit of pending nodes.
if len(self._pending_nodes) < self._max_pending:
await self.request_next_batch()
# Retry pending nodes that timed out.
if self._pending_nodes:
await self.retry_timedout()
if len(self._pending_nodes) > self._max_pending:
# Slow down if we've reached the limit of pending nodes.
self.logger.debug("Pending trie nodes limit reached, sleeping a bit")
await asyncio.sleep(0.3)
else:
# Yield control to ensure the Peer's msg_handler callback is called to process any
# nodes we may have received already. Otherwise we spin too fast and don't process
# received nodes often enough.
await asyncio.sleep(0)
self._maybe_report_progress()
self.logger.info("Finished state sync with root hash %s", encode_hex(self.root_hash))
def _maybe_report_progress(self):
if (time.time() - self._last_report_time) >= self._report_interval:
self._last_report_time = time.time()
self.logger.info("Nodes processed: %d", self._total_processed_nodes)
self.logger.info(
"Nodes requested but not received yet: %d", len(self._pending_nodes))
self.logger.info(
"Nodes scheduled but not requested yet: %d", len(self.scheduler.requests))
class StateSync(HexaryTrieSync):
def leaf_callback(self, data, parent):
# TODO: Need to figure out why geth uses 64 as the depth here, and then document it.
depth = 64
account = rlp.decode(data, sedes=Account)
if account.storage_root != BLANK_ROOT_HASH:
self.schedule(account.storage_root, parent, depth, leaf_callback=None)
if account.code_hash != EMPTY_SHA3:
self.schedule(account.code_hash, parent, depth, leaf_callback=None, is_raw=True)
def _test():
import argparse
import signal
from p2p import ecies
from p2p.peer import HardCodedNodesPeerPool
from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
from evm.db.backends.level import LevelDB
from evm.db.backends.memory import MemoryDB
from tests.p2p.integration_test_helpers import FakeAsyncChainDB
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
parser = argparse.ArgumentParser()
parser.add_argument('-db', type=str, required=True)
parser.add_argument('-root-hash', type=str, required=True, help='Hex encoded root hash')
args = parser.parse_args()
chaindb = FakeAsyncChainDB(MemoryDB())
chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
peer_pool = HardCodedNodesPeerPool(
ETHPeer, chaindb, RopstenChain.network_id, ecies.generate_privkey())
asyncio.ensure_future(peer_pool.run())
state_db = LevelDB(args.db)
root_hash = decode_hex(args.root_hash)
downloader = StateDownloader(state_db, root_hash, peer_pool)
loop = asyncio.get_event_loop()
for sig in [signal.SIGINT, signal.SIGTERM]:
loop.add_signal_handler(sig, downloader.cancel_token.trigger)
async def run():
# downloader.run() will run in a loop until the SIGINT/SIGTERM handler triggers its cancel
# token, at which point it returns and we stop the pool and downloader.
await downloader.run()
await peer_pool.stop()
await downloader.stop()
loop.run_until_complete(run())
loop.close()
if __name__ == "__main__":
_test()
| 40.493333 | 98 | 0.654593 |
25011f8965c028ab518c068411ced150bbce5a3f | 46,409 | py | Python | klearn/model_selection/metrics_depr.py | KevinLiao159/klearn | ffc0cb6b69cd21f2aac8934af55ac6e32c4db689 | [
"MIT"
] | 1 | 2019-05-03T08:20:57.000Z | 2019-05-03T08:20:57.000Z | klearn/model_selection/metrics_depr.py | KevinLiao159/klearn | ffc0cb6b69cd21f2aac8934af55ac6e32c4db689 | [
"MIT"
] | null | null | null | klearn/model_selection/metrics_depr.py | KevinLiao159/klearn | ffc0cb6b69cd21f2aac8934af55ac6e32c4db689 | [
"MIT"
] | null | null | null | """
Metrics to assess performance on classification task given class prediction
Functions named as *_score return a scalar value to maximize: the higher
the better
Function named as *_error or *_loss return a scalar value to minimize:
the lower the better
"""
# Authors: Kevin Liao
import numpy as np
import math
from sklearn.metrics.classification import _weighted_sum
from sklearn.metrics import (accuracy_score, f1_score, precision_score,
recall_score, roc_auc_score, log_loss)
from sklearn.metrics import mean_squared_error
from gravity_learn.utils import force_array
import warnings
warnings.warn("This module was deprecated. All scores and metrics "
"are moved to model_selection.metrics",
DeprecationWarning)
__all__ = ('classification_error',
'long_error',
'short_error',
'short_precision_score',
'short_recall_score',
'top_bottom_accuracy_score',
'top_bottom_error',
'top_bottom_long_error',
'top_bottom_short_error',
'top_bottom_precision_score',
'top_bottom_recall_score',
'top_bottom_short_precision_score',
'top_bottom_short_recall_score',
'top_bottom_f1_score',
'top_bottom_roc_auc_score',
'top_bottom_log_loss',
'root_mean_squared_error',
'mean_absolute_percentage_error')
# --------------------------------------------------
# Classification metrics
# --------------------------------------------------
def _select_top_and_bottom(y_true, y_score,
percentile=10, interpolation='midpoint'):
"""
Select truth values, predictions, scores of the top and bottom observations
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, ]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, 2]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
Returns
-------
y_true_ext : array, shape = [n_samples] or [n_samples, ]
True binary labels in binary label indicators of top and bottom
y_score_ext : array, shape = [n_samples] or [n_samples, 2]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions of top and bottom.
y_pred_ext : array, shape = [n_samples] or [n_samples, ]
Target prediction, can either be 1 or 0, top is always 1 and bottom\
is always 0.
"""
y_true = force_array(y_true)
y_score = force_array(y_score)
upperQ = np.percentile(y_score[:, 1], q=(100-percentile),
interpolation=interpolation)
lowerQ = np.percentile(y_score[:, 1], q=percentile,
interpolation=interpolation)
top_bottom_filter = (y_score[:, 1] >= upperQ) | (y_score[:, 1] <= lowerQ)
y_true_ext = y_true[top_bottom_filter]
y_score_ext = y_score[top_bottom_filter]
y_pred_ext = y_score_ext[:, 1] >= 0.5
return y_true_ext, y_score_ext, y_pred_ext
def classification_error(y_true, y_pred,
normalize=True, sample_weight=None):
"""
Compute classification error
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
normalize : bool, optional (default=True)
If False, return the number of misclassified samples.
Otherwise, return the fraction of misclassified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
error : float
If normalize == True, return the misclassified samples
(float), else it returns the number of misclassified samples
(int).
"""
return 1 - accuracy_score(y_true, y_pred, normalize, sample_weight)
def long_error(y_true, y_pred, normalize=True, sample_weight=None):
"""
Error of long classification. False negative rate (FNR)
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If False, return the number of misclassified samples.
Otherwise, return the fraction of misclassified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
error : float
If normalize == True, return the misclassified samples
(float), else it returns the number of misclassified samples
(int).
The best performance is 0
"""
long_true = y_true[y_true == 1]
long_pred = y_pred[y_true == 1]
score = long_pred != long_true
return _weighted_sum(score, sample_weight, normalize)
def short_error(y_true, y_pred, normalize=True, sample_weight=None):
"""
Error of short classification. False positive rate (FPR)
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If False, return the number of misclassified samples.
Otherwise, return the fraction of misclassified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
error : float
If normalize == True, return the misclassified samples
(float), else it returns the number of misclassified samples
(int).
The best performance is 0
"""
short_true = y_true[y_true == 0]
short_pred = y_pred[y_true == 0]
score = short_pred != short_true
return _weighted_sum(score, sample_weight, normalize)
def short_precision_score(y_true, y_pred,
average='binary', sample_weight=None):
"""
Precision of short prediction. False omission rate (FOR)
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If None, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
'binary':
Only report results for the class specified by pos_label.
This is applicable only if targets (y_{true,pred}) are binary.
'micro':
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
'macro':
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
'weighted':
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
'samples':
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the negative class in binary classification or weighted
average of the precision of each class for the multiclass task.
"""
p = precision_score(y_true, y_pred,
labels=None, pos_label=0,
average=average, sample_weight=sample_weight)
return p
def short_recall_score(y_true, y_pred, average='binary', sample_weight=None):
"""
Recall of short prediction. True negative rate (TNR), Specificity (SPC)
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If None, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
'binary':
Only report results for the class specified by pos_label.
This is applicable only if targets (y_{true,pred}) are binary.
'micro':
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
'macro':
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
'weighted':
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
'samples':
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall: float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the negative class in binary classification or weighted
average of the recall of each class for the multiclass task.
"""
r = recall_score(y_true, y_pred,
labels=None, pos_label=0, average=average,
sample_weight=sample_weight)
return r
def top_bottom_accuracy_score(y_true, y_score,
percentile=10, interpolation='midpoint',
normalize=True, sample_weight=None):
"""
Accuracy score of top and bottom percentile observations.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
normalize : bool, optional (default=True)
If False, return the number of misclassified samples.
Otherwise, return the fraction of misclassified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If normalize == True, return the misclassified samples
(float), else it returns the number of misclassified samples
(int).
The best performance is 1 with normalize == True and the number
of samples with normalize == False.
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return accuracy_score(y_true_ext, y_pred_ext, normalize, sample_weight)
def top_bottom_error(y_true, y_score,
percentile=10, interpolation='midpoint',
normalize=True, sample_weight=None):
"""
Classification error for top and bottom percentile
observations.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
normalize : bool, optional (default=True)
If False, return the number of misclassified samples.
Otherwise, return the fraction of misclassified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
error : float
If normalize == True, return the misclassified samples
(float), else it returns the number of misclassified samples
(int).
The best performance is 0
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return classification_error(y_true_ext, y_pred_ext,
normalize, sample_weight)
def top_bottom_long_error(y_true, y_score,
percentile=10, interpolation='midpoint',
normalize=True, sample_weight=None):
"""
Classification error for long class of top and bottom percentile
observations.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
normalize : bool, optional (default=True)
If False, return the number of misclassified samples.
Otherwise, return the fraction of misclassified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
error : float
If normalize == True, return the misclassified samples
(float), else it returns the number of misclassified samples
(int).
The best performance is 0
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return long_error(y_true_ext, y_pred_ext, normalize, sample_weight)
def top_bottom_short_error(y_true, y_score,
percentile=10, interpolation='midpoint',
normalize=True, sample_weight=None):
"""
Classification error for short class of top and bottom percentile
observations.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
normalize : bool, optional (default=True)
If False, return the number of misclassified samples.
Otherwise, return the fraction of misclassified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
error : float
If normalize == True, return the misclassified samples
(float), else it returns the number of misclassified samples
(int).
The best performance is 0
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return short_error(y_true_ext, y_pred_ext, normalize, sample_weight)
def top_bottom_precision_score(y_true, y_score,
percentile=10, interpolation='midpoint',
average='binary', sample_weight=None):
"""
Compute the precision of top and bottom observations
The precision is the ratio tp / (tp + fp) where tp is the number of
true positives and fp the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If None, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
'binary':
Only report results for the class specified by pos_label.
This is applicable only if targets (y_{true,pred}) are binary.
'micro':
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
'macro':
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
'weighted':
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
'samples':
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Top and bottom precision of the positive class in binary \
classification or weighted average of the precision of each class \
for the multiclass task.
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return precision_score(y_true=y_true_ext, y_pred=y_pred_ext,
pos_label=1, average=average,
sample_weight=sample_weight)
def top_bottom_recall_score(y_true, y_score,
percentile=10, interpolation='midpoint',
average='binary', sample_weight=None):
"""
Compute the recall of top and bottom observations
The precision is the ratio tp / (tp + fp) where tp is the number of
true positives and fp the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If None, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
'binary':
Only report results for the class specified by pos_label.
This is applicable only if targets (y_{true,pred}) are binary.
'micro':
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
'macro':
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
'weighted':
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
'samples':
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Top and bottom recall of the positive class in binary classification \
or weighted average of the precision of each class for \
the multiclass task.
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return recall_score(y_true=y_true_ext, y_pred=y_pred_ext,
pos_label=1, average=average,
sample_weight=sample_weight)
def top_bottom_short_precision_score(y_true, y_score,
percentile=10, interpolation='midpoint',
average='binary', sample_weight=None):
"""
Compute the short precision of top and bottom observations
The precision is the ratio tp / (tp + fp) where tp is the number of
true positives and fp the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If None, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
'binary':
Only report results for the class specified by pos_label.
This is applicable only if targets (y_{true,pred}) are binary.
'micro':
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
'macro':
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
'weighted':
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
'samples':
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Top and bottom precision of the negative class in binary \
classification or weighted average of the precision of each class \
for the multiclass task.
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return precision_score(y_true=y_true_ext, y_pred=y_pred_ext,
pos_label=0, average=average,
sample_weight=sample_weight)
def top_bottom_short_recall_score(y_true, y_score,
percentile=10, interpolation='midpoint',
average='binary', sample_weight=None):
"""
Compute the recall of top and bottom of short observations
The precision is the ratio tp / (tp + fp) where tp is the number of
true positives and fp the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If None, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
'binary':
Only report results for the class specified by pos_label.
This is applicable only if targets (y_{true,pred}) are binary.
'micro':
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
'macro':
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
'weighted':
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
'samples':
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Top and bottom recall of the positive class in binary classification \
or weighted average of the precision of each class for \
the multiclass task.
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return recall_score(y_true=y_true_ext, y_pred=y_pred_ext,
pos_label=0, average=average,
sample_weight=sample_weight)
def top_bottom_f1_score(y_true, y_score,
percentile=10, interpolation='midpoint',
average='binary', sample_weight=None):
"""
Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If None, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
'binary':
Only report results for the class specified by pos_label.
This is applicable only if targets (y_{true,pred}) are binary.
'micro':
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
'macro':
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
'weighted':
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
'samples':
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return f1_score(y_true=y_true_ext, y_pred=y_pred_ext,
pos_label=1, average=average,
sample_weight=sample_weight)
def top_bottom_roc_auc_score(y_true, y_score,
percentile=10, interpolation='midpoint',
average='macro', sample_weight=None):
"""
Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If None, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
'binary':
Only report results for the class specified by pos_label.
This is applicable only if targets (y_{true,pred}) are binary.
'micro':
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
'macro':
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
'weighted':
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
'samples':
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
# Need a hack here [:,1]
# TODO: need to handle pos_label in _binary_check
return roc_auc_score(y_true=y_true_ext, y_score=y_score_ext[:, 1],
average=average, sample_weight=sample_weight)
def top_bottom_log_loss(y_true, y_score,
percentile=10, interpolation='midpoint',
eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return log_loss(y_true=y_true_ext, y_pred=y_pred_ext,
eps=eps, normalize=normalize, sample_weight=sample_weight)
# --------------------------------------------------
# Regression metrics
# --------------------------------------------------
def root_mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Root mean squared error regression loss
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
"""
mse = mean_squared_error(
y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight,
multioutput=multioutput
)
return math.sqrt(mse)
def mean_absolute_percentage_error(y_true, y_pred, robust=False):
"""mean_absolute_percentage_error
Use case:
y is expressed in percent and we want to take pct into account
Formula:
mean_absolute_percentage_error = \
mean(abs((y_true - y_pred) / y_true) * 100
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
robust : bool, if True, use median, otherwise, mean
Default is False
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0)
"""
y_true = force_array(y_true)
y_pred = force_array(y_pred)
if robust:
loss = np.median(np.abs((y_true - y_pred)/y_true)) * 100
else: # use mean
loss = np.mean(np.abs((y_true - y_pred)/y_true)) * 100
return loss
| 40.531878 | 79 | 0.644487 |
8dbe91a94e3019c784d2bef467ed4afc759afe05 | 1,936 | py | Python | portfolio/Python/scrapy/gadgetpanda/fonebank_spider.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/Python/scrapy/gadgetpanda/fonebank_spider.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/Python/scrapy/gadgetpanda/fonebank_spider.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | 5 | 2016-03-22T07:40:46.000Z | 2021-05-30T16:12:21.000Z | import os
import csv
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class FoneBankSpider(BaseSpider):
name = 'fonebank.com'
allowed_domains = ['fonebank.com']
def __init__(self, *args, **kwargs):
super(FoneBankSpider, self).__init__(*args, **kwargs)
csv_file = csv.reader(open(os.path.join(HERE, 'fonebank_products.csv')))
self.products =[(row[0], row[1]) for row in csv_file]
def start_requests(self):
for name, url in self.products:
name = name.strip()
yield Request(url)
def parse(self, response):
hxs = HtmlXPathSelector(response)
status = hxs.select('//*[@id="ctl00_ContentPlaceHolder1_div_working"]')
loader = ProductLoader(item=Product(), selector=status)
name = ' '.join(status.select('table/tr/td/span[@id="ctl00_ContentPlaceHolder1_lblmodelname"]/text()').extract())
loader.add_value('name', ' '.join((name, 'Working')))
loader.add_value('url', response.url)
loader.add_xpath('price', 'table/tr/td/div/span[@id="ctl00_ContentPlaceHolder1_lblprice"]/text()')
yield loader.load_item()
status = hxs.select('//*[@id="ctl00_ContentPlaceHolder1_div_not_working"]')
loader = ProductLoader(item=Product(), selector=status)
name = status.select('table/tr/td/span[@id="ctl00_ContentPlaceHolder1_lblnonworkingmodelname"]/text()').extract()[0]
loader.add_value('name', ' '.join((name, 'Not working')))
loader.add_value('url', response.url)
loader.add_xpath('price', 'table/tr/td/div/span[@id="ctl00_ContentPlaceHolder1_lblpricenonworking"]/text()')
yield loader.load_item()
| 44 | 124 | 0.688533 |
a0095bedbd786b2d5e171c2ffeb4b9356d70a6db | 4,698 | py | Python | test/spec/bookshop/sqlalchemy/model_to_dict_spec.py | robyoung/genyrator | 849f2ec83ef6dd9e2e5928cb58f747cc40016f2a | [
"MIT"
] | null | null | null | test/spec/bookshop/sqlalchemy/model_to_dict_spec.py | robyoung/genyrator | 849f2ec83ef6dd9e2e5928cb58f747cc40016f2a | [
"MIT"
] | 9 | 2019-09-13T09:31:55.000Z | 2021-01-11T11:09:17.000Z | test/spec/bookshop/sqlalchemy/model_to_dict_spec.py | robyoung/genyrator | 849f2ec83ef6dd9e2e5928cb58f747cc40016f2a | [
"MIT"
] | 1 | 2020-07-22T15:03:41.000Z | 2020-07-22T15:03:41.000Z | import datetime
import uuid
from expects import expect, have_keys, have_key, equal
from mamba import description, it
from sqlalchemy.orm import joinedload
from bookshop import app
from bookshop.sqlalchemy.model import Book, Author, Genre, BookGenre, Review
from bookshop.sqlalchemy.model_to_dict import model_to_dict
from bookshop import db
BOOK_UUID = uuid.uuid4()
AUTHOR_UUID = uuid.uuid4()
GENRE_UUID = uuid.uuid4()
REVIEW_UUID = uuid.uuid4()
datetime_now = datetime.datetime.now()
date_now = datetime.datetime.today().date()
author_model = Author(id=1, author_id=AUTHOR_UUID, name='orwell')
author_dict = {
"id": AUTHOR_UUID,
"name": 'orwell',
}
book_model = Book(
id=1,
book_id=BOOK_UUID,
name='animal farm',
rating=4.1,
author_id=1,
published=date_now,
created=datetime_now,
)
book_dict = {
"id": BOOK_UUID,
"name": 'animal farm',
"rating": 4.1,
"published": date_now,
"created": datetime_now,
}
genre_model = Genre(
id=1,
genre_id=GENRE_UUID,
title='genre title',
)
book_genre_model = BookGenre(
id=1, book_genre_id=uuid.uuid4(),
book_id=1, genre_id=1,
)
review_model = Review(id=1, review_id=REVIEW_UUID, text='scathing review - pigs cant talk', book_id=1)
review_dict = {
'id': REVIEW_UUID,
'text': 'scathing review - pigs cant talk'
}
with app.app_context():
db.drop_all()
db.create_all()
db.session.add(author_model)
db.session.add(book_model)
db.session.add(review_model)
db.session.commit()
with description('model_to_dict') as self:
with it('converts a flat model into a dict'):
with app.app_context():
retrieved_book = Book.query.filter_by(book_id=BOOK_UUID).first()
result = model_to_dict(
sqlalchemy_model=retrieved_book,
)
expect(result).to(have_keys(**book_dict))
with it('converts a singly-nested relationship'):
with app.app_context():
retrieved_book = Book.query.\
filter_by(book_id=BOOK_UUID).\
options(joinedload('author')).\
first()
result = model_to_dict(
sqlalchemy_model=retrieved_book,
paths=['author'],
)
expect(result).to(have_keys(**book_dict))
expect(result['author']).to(have_keys(**author_dict))
with it('always converts eager relationships'):
with app.app_context():
retrieved_book = Book.query.\
filter_by(book_id=BOOK_UUID).\
options(joinedload('author')).\
first()
result = model_to_dict(
sqlalchemy_model=retrieved_book
)
expect(result).to(have_keys(**book_dict))
expect(result['author']).to(have_keys(**author_dict))
with it('always converts eager relationships and retains hydrated paths within that relationship'):
with app.app_context():
retrieved_author = Author.query.\
filter_by(author_id=AUTHOR_UUID).\
options(joinedload('favourite_book')).\
first()
result = model_to_dict(
sqlalchemy_model=retrieved_author,
paths=['favourite_book', 'reviews']
)
expect(result).to(have_keys(**author_dict))
expect(result['favourite_book']).to(have_keys(**book_dict))
expect(result['favourite_book']['reviews'][0]).to(have_keys(**review_dict))
with it('gives an empty response when relationship does not exist'):
book_without_author = Book(
book_id=str(uuid.uuid4()),
name='',
rating=0.2,
)
with app.app_context():
db.session.add(book_without_author)
db.session.commit()
retrieved_book = Book.query.\
filter_by(book_id=book_without_author.book_id).\
first()
result = model_to_dict(
sqlalchemy_model=retrieved_book,
paths=['author'],
)
expect(result).to(have_key('author'))
expect(result['author']).to(equal(None))
with it('converts a deeply nested relationship'):
with app.app_context():
db.session.add(genre_model)
db.session.add(book_genre_model)
db.session.commit()
retrieved_genre = Book.query. \
filter_by(id=1). \
options(joinedload('genre')). \
first()
result = model_to_dict(
sqlalchemy_model=retrieved_genre,
paths=['genre'],
)
expect(result['genre']['id']).to(equal(GENRE_UUID))
| 31.32 | 103 | 0.607493 |
9e0ab45ee330497689a069eea95fd31ca6edacaf | 1,001 | py | Python | Python/rightSideView.py | alexwu2021/practice | ff786d4d16afdef3e031002d22b58a976c8ed16b | [
"MIT"
] | null | null | null | Python/rightSideView.py | alexwu2021/practice | ff786d4d16afdef3e031002d22b58a976c8ed16b | [
"MIT"
] | 1 | 2021-11-22T05:54:33.000Z | 2021-11-22T05:54:33.000Z | Python/rightSideView.py | alexwu2021/practice | ff786d4d16afdef3e031002d22b58a976c8ed16b | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def rightSideView_mine(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
def dfs(node, path):
if not node:
return
path.append(node.val)
dfs(node.right, path)
ret = []
dfs(root, ret)
return ret
#stenfanP's solution.
#the trick part is that the left tree might have some aspects exposed to the right view, so has to be covered under the height
def rightSideView(self, root):
def collect(node, depth):
if node:
if depth == len(view):
view.append(node.val)
collect(node.right, depth+1)
collect(node.left, depth+1)
view = []
collect(root, 0)
return view
| 27.054054 | 130 | 0.509491 |
10e21beef6ac30c4b0b48456e4e5e4f85bec5cf1 | 1,449 | py | Python | situs/jualtiket/models.py | torikwer/django-penj-tiket | 846ef20d74700f798922f2364df7736b5e41bb82 | [
"MIT"
] | null | null | null | situs/jualtiket/models.py | torikwer/django-penj-tiket | 846ef20d74700f798922f2364df7736b5e41bb82 | [
"MIT"
] | null | null | null | situs/jualtiket/models.py | torikwer/django-penj-tiket | 846ef20d74700f798922f2364df7736b5e41bb82 | [
"MIT"
] | null | null | null | import datetime
from django.db import models
from django.utils import timezone
from django.forms import ModelForm
# Create your models here.
class Tiket(models.Model):
nama_film = models.CharField(max_length=200)
jumlah_tiket = models.PositiveIntegerField(default=0)
deskripsi = models.TextField()
tanggal = models.DateTimeField('tanggal')
harga_satuan = models.CharField(max_length=7)
def __str__(self):
return self.nama_film
def daftar_tiket(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.tanggal <= now
deskripsi = models.CharField(max_length=2000,
null = True)
class posisi(models.Model):
posisi_duduk = (
('S', 'Semua Deret'),
('D', 'Depan'),
('T', 'Tengah'),
('B', 'Belakang'),
)
tiket = models.ForeignKey(Tiket,
blank = True,
null = True,
on_delete = models.CASCADE)
deret_duduk = models.CharField(max_length=1,
choices = posisi_duduk)
def __str__(self):
return self.deret_duduk
class TiketForm(ModelForm):
class Meta:
model = Tiket
fields = ['nama_film',
'jumlah_tiket',
'deskripsi',
'tanggal',
'harga_satuan']
| 27.865385 | 70 | 0.547274 |
d10920f8c29b6d4b5976fc52127c455198206d6e | 1,230 | py | Python | mandelbrot_orbit/mandelbrot_orbit.py | josecelano/mandelbrot-orbit | 4940364dcdd28b9bda3588e333a8664a6e8f4f8e | [
"MIT"
] | null | null | null | mandelbrot_orbit/mandelbrot_orbit.py | josecelano/mandelbrot-orbit | 4940364dcdd28b9bda3588e333a8664a6e8f4f8e | [
"MIT"
] | null | null | null | mandelbrot_orbit/mandelbrot_orbit.py | josecelano/mandelbrot-orbit | 4940364dcdd28b9bda3588e333a8664a6e8f4f8e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""mandelbrot_orbit.mandelbrot_orbit: provides entry point main()."""
__version__ = "1.1.0"
import sys
import matplotlib.pyplot as plt
import mpmath
from mandelbrot_orbit.orbit_calculator import OrbitCalculator
def main():
print("Executing mandelbrot_orbit version %s." % __version__)
print("List of argument strings: %s" % sys.argv[1:])
zx = sys.argv[1]
zy = sys.argv[2]
num_iterations = int(sys.argv[3])
filepath = sys.argv[4]
print("Generation of orbit for point (", zx, " , ", zy, ") with ", num_iterations, " iterations ...")
# Calculate orbit
c = mpmath.mpc(real=zx, imag=zy)
orbit_re, orbit_im = OrbitCalculator.generate(c, num_iterations)
# Format data for plot
x = range(num_iterations)
orbit_re_float = list(map(float, orbit_re))
orbit_im_float = list(map(float, orbit_im))
# Plot real and imaginary parts
plt.plot(x, orbit_re_float, color="orange", linewidth=1.5)
plt.plot(x, orbit_im_float, color="blue", linewidth=1.5)
# Show grid
plt.grid(True)
# Image size
plt.gcf().set_size_inches(12, 4)
# x axis
plt.xlim([0, 100])
# Save image
plt.savefig(filepath, bbox_inches="tight")
| 24.117647 | 105 | 0.661789 |
13eb0d53949b2a7a86bda1520934c8c7f597b6e9 | 549 | py | Python | backend/src/access/access_event_publish.py | fjacob21/mididecweb | b65f28eb6fdeafa265796b6190a4264a5eac54ce | [
"MIT"
] | null | null | null | backend/src/access/access_event_publish.py | fjacob21/mididecweb | b65f28eb6fdeafa265796b6190a4264a5eac54ce | [
"MIT"
] | 88 | 2016-11-12T14:54:38.000Z | 2018-08-02T00:25:07.000Z | backend/src/access/access_event_publish.py | mididecouverte/mididecweb | b65f28eb6fdeafa265796b6190a4264a5eac54ce | [
"MIT"
] | null | null | null | from user import USER_ACCESS_SUPER, USER_ACCESS_MANAGER
class EventPublishAccess(object):
def __init__(self, session, event):
self._session = session
self._event = event
def granted(self):
if not self._session.user:
return False
if self._session.user.access == USER_ACCESS_SUPER:
return True
is_owner = self._event.owner_id == self._session.user.user_id
if self._session.user.access == USER_ACCESS_MANAGER and is_owner:
return True
return False
| 28.894737 | 73 | 0.657559 |
940c3b7c43c650a06eabaf6d76ffe9ecfdcd482a | 1,426 | py | Python | main.py | Keerthi001/PySodium | 761598d8a129ce95a42404898b7f16ddcae568d9 | [
"MIT"
] | 3 | 2020-04-04T20:22:15.000Z | 2021-02-11T13:13:14.000Z | main.py | Keerthi001/PySodium | 761598d8a129ce95a42404898b7f16ddcae568d9 | [
"MIT"
] | 1 | 2020-07-01T14:14:50.000Z | 2020-07-01T16:04:13.000Z | main.py | Keerthi001/PySodium | 761598d8a129ce95a42404898b7f16ddcae568d9 | [
"MIT"
] | null | null | null | import argparse
import os
import random
from typing import Any, List, Tuple, Dict
from types import ModuleType
import numpy as np
import torch
import torch.nn as nn
import torch.optim as module_optimizer
import torch.optim.lr_scheduler as module_scheduler
from torchvision import datasets, transforms
from sodium.utils import setup_logger, load_config, seed_everything
from sodium.trainer import Trainer
import sodium.runner as runner
logger = setup_logger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Train a Sodium Model')
parser.add_argument('-c', '--config', default=None,
type=str, help='config file path (default: None)')
parser.add_argument('--tsai-mode', action='store_true',
help='Enable TSAI Mode')
# parse the arguments
args = parser.parse_args()
# load the config
config = load_config(args.config)
# create a runner
runner = runner.Runner(config)
# setup train parameters
runner.setup_train(tsai_mode=args.tsai_mode)
# print the model summary
runner.print_summary(input_size=(3, 32, 32))
# find lr
runner.find_lr()
# train the network
runner.train(use_bestlr=True)
# plot metrics
runner.plot_metrics()
# plot gradcam
target_layers = ["layer1", "layer2", "layer3", "layer4"]
runner.plot_gradcam(target_layers=target_layers)
| 24.586207 | 74 | 0.706171 |
657860fcda2ee2c536049ee62fffa49fc92bccbd | 1,475 | py | Python | flexget/plugins/output/series_begin.py | sillygreen89/Flexget | 60f24ab0dda7b94c87ba43451921c50c3cef391f | [
"MIT"
] | null | null | null | flexget/plugins/output/series_begin.py | sillygreen89/Flexget | 60f24ab0dda7b94c87ba43451921c50c3cef391f | [
"MIT"
] | null | null | null | flexget/plugins/output/series_begin.py | sillygreen89/Flexget | 60f24ab0dda7b94c87ba43451921c50c3cef391f | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.event import event
from flexget.plugins.filter.series import Series, set_series_begin
log = logging.getLogger('set_series_begin')
class SetSeriesBegin(object):
"""
Set the first episode for series. Uses series_name and series_id.
Example::
set_series_begin: yes
"""
schema = {'type': 'boolean'}
def on_task_output(self, task, config):
if not (config and task.accepted):
return
for entry in task.accepted:
if entry.get('series_name') and entry.get('series_id'):
fshow = task.session.query(Series).filter(Series.name == entry['series_name']).first()
if not fshow:
fshow = Series()
fshow.name = entry['series_name']
task.session.add(fshow)
try:
set_series_begin(fshow, entry['series_id'])
except ValueError as e:
self.log.error('An error occurred trying to set begin for %s: %s' % (entry['series_name'], e))
self.log.info('First episode for "%s" set to %s' % (entry['series_name'], entry['series_id']))
@event('plugin.register')
def register_plugin():
plugin.register(SetSeriesBegin, 'set_series_begin', api_ver=2)
| 32.777778 | 114 | 0.627119 |
7cbcc6a13c3e16d2b1b7f8f1091ebea548a480a4 | 1,033 | py | Python | pyvisdk/do/dvs_vendor_specific_config.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/dvs_vendor_specific_config.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/dvs_vendor_specific_config.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def DVSVendorSpecificConfig(vim, *args, **kwargs):
'''This data object type describes vendor specific configuration.'''
obj = vim.client.factory.create('{urn:vim25}DVSVendorSpecificConfig')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'inherited' ]
optional = [ 'keyValue', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 31.30303 | 124 | 0.61181 |
8006385637cc2791306f3e9504353c66747e12e0 | 659 | py | Python | blogtutor/users/tests/test_urls.py | vladdou/blogtutor | 2cf653a2f9a854b488646a86492ce1fe466e19f8 | [
"MIT"
] | null | null | null | blogtutor/users/tests/test_urls.py | vladdou/blogtutor | 2cf653a2f9a854b488646a86492ce1fe466e19f8 | [
"MIT"
] | 4 | 2021-05-12T05:20:53.000Z | 2022-03-22T01:05:33.000Z | blogtutor/users/tests/test_urls.py | vladdou/blogtutor | 2cf653a2f9a854b488646a86492ce1fe466e19f8 | [
"MIT"
] | null | null | null | import pytest
from django.urls import resolve, reverse
from blogtutor.users.models import User
pytestmark = pytest.mark.django_db
def test_detail(user: User):
assert (
reverse("users:detail", kwargs={"username": user.username})
== f"/users/{user.username}/"
)
assert resolve(f"/users/{user.username}/").view_name == "users:detail"
def test_update():
assert reverse("users:update") == "/users/~update/"
assert resolve("/users/~update/").view_name == "users:update"
def test_redirect():
assert reverse("users:redirect") == "/users/~redirect/"
assert resolve("/users/~redirect/").view_name == "users:redirect"
| 26.36 | 74 | 0.676783 |
ff1aabdff0d5f29ffc0217e8730359c985e95c31 | 7,350 | py | Python | resources/lib/ui/filmlistUi.py | ahpohl/plugin.video.mediathekview | c52484a267c51b298a71f436eb320d903dc31347 | [
"MIT"
] | 53 | 2018-01-12T12:43:52.000Z | 2022-02-05T19:42:39.000Z | resources/lib/ui/filmlistUi.py | ahpohl/plugin.video.mediathekview | c52484a267c51b298a71f436eb320d903dc31347 | [
"MIT"
] | 176 | 2018-01-09T09:13:31.000Z | 2022-02-13T11:27:47.000Z | resources/lib/ui/filmlistUi.py | ahpohl/plugin.video.mediathekview | c52484a267c51b298a71f436eb320d903dc31347 | [
"MIT"
] | 29 | 2018-01-12T01:42:50.000Z | 2022-02-05T19:42:54.000Z | # -*- coding: utf-8 -*-
"""
The film model UI module
Copyright 2017-2019, Leo Moll and Dominik Schlösser
SPDX-License-Identifier: MIT
"""
import time
import os
from datetime import datetime
from datetime import timedelta
# pylint: disable=import-error
import xbmcgui
import xbmcplugin
import resources.lib.appContext as appContext
from resources.lib.model.film import Film
class FilmlistUi(object):
"""
The filmui which generates a film list from data
Args:
plugin(MediathekView): the plugin object
"""
def __init__(self, plugin, pLongTitle=True):
self.logger = appContext.MVLOGGER.get_new_logger('FilmlistUI')
self.plugin = plugin
self.handle = plugin.addon_handle
self.settings = appContext.MVSETTINGS
self.useLongTitle = pLongTitle
# define sortmethod for films
# all av. sort method and put the default sortmethod on first place to be used by UI
allSortMethods = [
xbmcplugin.SORT_METHOD_UNSORTED,
xbmcplugin.SORT_METHOD_TITLE,
xbmcplugin.SORT_METHOD_DATE,
xbmcplugin.SORT_METHOD_DATEADDED,
xbmcplugin.SORT_METHOD_DURATION
]
method = allSortMethods[0]
allSortMethods[0] = allSortMethods[self.settings.getFilmSortMethod()]
allSortMethods[self.settings.getFilmSortMethod()] = method
self.sortmethods = allSortMethods
#
self.startTime = 0
self.tzDiff = datetime.now() - datetime.utcnow()
self.tzBase = datetime.fromtimestamp(0)
def generate(self, databaseRs):
#
# 0 - idhash, 1 - title, 2 - showname, 3 - channel,
# 4 - description, 5 - duration, 6 - aired,
# 7- url_sub, 8- url_video, 9 - url_video_sd, 10 - url_video_hd
#
self.startTime = time.time()
#
xbmcplugin.setContent(self.handle, self.settings.getContentType())
for method in self.sortmethods:
xbmcplugin.addSortMethod(self.handle, method)
#
aFilm = Film()
listOfElements = []
for element in databaseRs:
#
aFilm.init(element[0], element[1], element[2], element[3], element[4], element[5],
element[6], element[7], element[8], element[9], element[10])
#
(targetUrl, list_item) = self._generateListItem(aFilm)
#
list_item.addContextMenuItems(self._generateContextMenu(aFilm))
#
if self.settings.getAutoSub() and aFilm.url_sub:
targetUrl = self.plugin.build_url({
'mode': "playwithsrt",
'id': aFilm.filmid
})
#
listOfElements.append((targetUrl, list_item, False))
#
xbmcplugin.addDirectoryItems(
handle=self.handle,
items=listOfElements,
totalItems=len(listOfElements)
)
#
xbmcplugin.endOfDirectory(self.handle, cacheToDisc=False)
self.plugin.setViewId(self.plugin.resolveViewId('LIST'))
#
self.logger.debug('generated: {} sec', time.time() - self.startTime)
def _generateListItem(self, pFilm):
#
videohds = ""
if (pFilm.url_video_hd != "" and self.settings.getPreferHd()):
videourl = pFilm.url_video_hd
videohds = " (HD)"
elif (pFilm.url_video_sd != ""):
videourl = pFilm.url_video_sd
else:
videourl = pFilm.url_video
# exit if no url supplied
if videourl == "":
return None
videourl = videourl + self.settings.getUserAgentString()
if self.useLongTitle:
resultingtitle = pFilm.show + ': ' + pFilm.title + videohds
else:
resultingtitle = pFilm.title + videohds
info_labels = {
'title': resultingtitle,
'sorttitle': resultingtitle.lower(),
'tvshowtitle': pFilm.show,
'plot': pFilm.description
}
if pFilm.seconds is not None and pFilm.seconds > 0:
info_labels['duration'] = pFilm.seconds
if pFilm.aired is not None and pFilm.aired != 0:
ndate = self.tzBase + timedelta(seconds=(pFilm.aired))
airedstring = ndate.isoformat().replace('T', ' ')
info_labels['date'] = airedstring[:10]
info_labels['aired'] = airedstring[:10]
info_labels['dateadded'] = airedstring
info_labels['plot'] = self.plugin.language(30990).format(airedstring) + info_labels['plot']
icon = os.path.join(
self.plugin.path,
'resources',
'icons',
'sender',
pFilm.channel.lower() + '-c.png'
)
#
if self.plugin.get_kodi_version() > 17:
listitem = xbmcgui.ListItem(label=resultingtitle, path=videourl, offscreen=True)
else:
listitem = xbmcgui.ListItem(label=resultingtitle, path=videourl)
#
listitem.setInfo(type='video', infoLabels=info_labels)
listitem.setProperty('IsPlayable', 'true')
listitem.setArt({
'thumb': icon,
'icon': icon,
'banner': icon,
'fanart': icon,
'clearart': icon,
'clearlogo': icon
})
return (videourl, listitem)
def _generateContextMenu(self, pFilm):
contextmenu = []
if pFilm.url_sub != '':
contextmenu.append((
self.plugin.language(30921),
'PlayMedia({})'.format(
self.plugin.build_url({
'mode': "playwithsrt",
'id': pFilm.filmid
})
)
))
# Download movie
contextmenu.append((
self.plugin.language(30922),
'RunPlugin({})'.format(
self.plugin.build_url({
'mode': "downloadmv",
'id': pFilm.filmid,
'quality': 1
})
)
))
if pFilm.url_video_hd:
# Download HD movie
contextmenu.append((
self.plugin.language(30923),
'RunPlugin({})'.format(
self.plugin.build_url({
'mode': "downloadmv",
'id': pFilm.filmid,
'quality': 2
})
)
))
# Download TV episode
contextmenu.append((
self.plugin.language(30924),
'RunPlugin({})'.format(
self.plugin.build_url({
'mode': "downloadep",
'id': pFilm.filmid,
'quality': 1
})
)
))
if pFilm.url_video_hd:
# Download HD TV episode
contextmenu.append((
self.plugin.language(30925),
'RunPlugin({})'.format(
self.plugin.build_url({
'mode': "downloadep",
'id': pFilm.filmid,
'quality': 2
})
)
))
return contextmenu
| 33.108108 | 103 | 0.526667 |
9afedf8153aa21a6f8823edaac6d8962f9b747aa | 7,397 | py | Python | prepare_physionet.py | gist-ailab/IITNet-official | 4b863ce75b966cbf444d86a2905ce3b3d4e1055d | [
"MIT"
] | 6 | 2022-03-22T20:33:34.000Z | 2022-03-31T11:00:08.000Z | prepare_physionet.py | gist-ailab/IITNet-official | 4b863ce75b966cbf444d86a2905ce3b3d4e1055d | [
"MIT"
] | null | null | null | prepare_physionet.py | gist-ailab/IITNet-official | 4b863ce75b966cbf444d86a2905ce3b3d4e1055d | [
"MIT"
] | null | null | null | import argparse
import glob
import math
import ntpath
import os
import shutil
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
from datetime import datetime
import numpy as np
from mne import Epochs, pick_types, find_events
from mne.io import concatenate_raws, read_raw_edf
import dhedfreader
# Label values
W = 0
N1 = 1
N2 = 2
N3 = 3
REM = 4
UNKNOWN = 5
stage_dict = {
"W": W,
"N1": N1,
"N2": N2,
"N3": N3,
"REM": REM,
"UNKNOWN": UNKNOWN
}
class_dict = {
0: "W",
1: "N1",
2: "N2",
3: "N3",
4: "REM",
5: "UNKNOWN"
}
ann2label = {
"Sleep stage W": 0,
"Sleep stage 1": 1,
"Sleep stage 2": 2,
"Sleep stage 3": 3,
"Sleep stage 4": 3,
"Sleep stage R": 4,
"Sleep stage ?": 5,
"Movement time": 5
}
EPOCH_SEC_SIZE = 30
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="./data",
help="File path to the CSV or NPY file that contains walking data.")
parser.add_argument("--output_dir", type=str, default="./datasets/Sleep-EDF/Fpz-Cz",
help="Directory where to save outputs.")
parser.add_argument("--select_ch", type=str, default="EEG Fpz-Cz",
help="File path to the trained model used to estimate walking speeds.")
args = parser.parse_args()
# Output dir
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
else:
shutil.rmtree(args.output_dir)
os.makedirs(args.output_dir)
# Select channel
select_ch = args.select_ch
# Read raw and annotation EDF files
psg_fnames = glob.glob(os.path.join(args.data_dir, "*PSG.edf"))
ann_fnames = glob.glob(os.path.join(args.data_dir, "*Hypnogram.edf"))
psg_fnames.sort()
ann_fnames.sort()
psg_fnames = np.asarray(psg_fnames)
ann_fnames = np.asarray(ann_fnames)
for i in range(len(psg_fnames)):
# if not "ST7171J0-PSG.edf" in psg_fnames[i]:
# continue
raw = read_raw_edf(psg_fnames[i], preload=True, stim_channel=None)
sampling_rate = raw.info['sfreq']
raw_ch_df = raw.to_data_frame(scalings=100.0)[select_ch]
raw_ch_df = raw_ch_df.to_frame()
raw_ch_df.set_index(np.arange(len(raw_ch_df)))
# Get raw header
f = open(psg_fnames[i], 'r', encoding='iso-8859-1')
reader_raw = dhedfreader.BaseEDFReader(f)
reader_raw.read_header()
h_raw = reader_raw.header
f.close()
raw_start_dt = datetime.strptime(h_raw['date_time'], "%Y-%m-%d %H:%M:%S")
# Read annotation and its header
f = open(ann_fnames[i], 'r', encoding='iso-8859-1')
reader_ann = dhedfreader.BaseEDFReader(f)
reader_ann.read_header()
h_ann = reader_ann.header
_, _, ann = list(zip(*reader_ann.records()))
f.close()
ann_start_dt = datetime.strptime(h_ann['date_time'], "%Y-%m-%d %H:%M:%S")
# Assert that raw and annotation files start at the same time
assert raw_start_dt == ann_start_dt
# Generate label and remove indices
remove_idx = [] # indicies of the data that will be removed
labels = [] # indicies of the data that have labels
label_idx = []
for a in ann[0]:
onset_sec, duration_sec, ann_char = a
ann_str = "".join(ann_char)
label = ann2label[ann_str]
if label != UNKNOWN:
if duration_sec % EPOCH_SEC_SIZE != 0:
raise Exception("Something wrong")
duration_epoch = int(duration_sec / EPOCH_SEC_SIZE)
label_epoch = np.ones(duration_epoch, dtype=np.int) * label
labels.append(label_epoch)
idx = int(onset_sec * sampling_rate) + np.arange(duration_sec * sampling_rate, dtype=np.int)
label_idx.append(idx)
print("Include onset:{}, duration:{}, label:{} ({})".format(
onset_sec, duration_sec, label, ann_str
))
else:
idx = int(onset_sec * sampling_rate) + np.arange(duration_sec * sampling_rate, dtype=np.int)
remove_idx.append(idx)
print("Remove onset:{}, duration:{}, label:{} ({})".format(
onset_sec, duration_sec, label, ann_str
))
labels = np.hstack(labels)
print("before remove unwanted: {}".format(np.arange(len(raw_ch_df)).shape))
if len(remove_idx) > 0:
remove_idx = np.hstack(remove_idx)
select_idx = np.setdiff1d(np.arange(len(raw_ch_df)), remove_idx)
else:
select_idx = np.arange(len(raw_ch_df))
print("after remove unwanted: {}".format(select_idx.shape))
# Select only the data with labels
print("before intersect label: {}".format(select_idx.shape))
label_idx = np.hstack(label_idx)
select_idx = np.intersect1d(select_idx, label_idx)
print("after intersect label: {}".format(select_idx.shape))
# Remove extra index
if len(label_idx) > len(select_idx):
print("before remove extra labels: {}, {}".format(select_idx.shape, labels.shape))
extra_idx = np.setdiff1d(label_idx, select_idx)
# Trim the tail
if np.all(extra_idx > select_idx[-1]):
n_trims = len(select_idx) % int(EPOCH_SEC_SIZE * sampling_rate)
n_label_trims = int(math.ceil(n_trims / (EPOCH_SEC_SIZE * sampling_rate)))
select_idx = select_idx[:-n_trims]
labels = labels[:-n_label_trims]
print("after remove extra labels: {}, {}".format(select_idx.shape, labels.shape))
# Remove movement and unknown stages if any
raw_ch = raw_ch_df.values[select_idx]
# Verify that we can split into 30-s epochs
if len(raw_ch) % (EPOCH_SEC_SIZE * sampling_rate) != 0:
raise Exception("Something wrong")
n_epochs = len(raw_ch) / (EPOCH_SEC_SIZE * sampling_rate)
# Get epochs and their corresponding labels
x = np.asarray(np.split(raw_ch, n_epochs)).astype(np.float32)[:, :, 0]
y = labels.astype(np.int32)
assert len(x) == len(y)
# Select on sleep periods
w_edge_mins = 30
nw_idx = np.where(y != stage_dict["W"])[0]
start_idx = nw_idx[0] - (w_edge_mins * 2)
end_idx = nw_idx[-1] + (w_edge_mins * 2)
if start_idx < 0: start_idx = 0
if end_idx >= len(y): end_idx = len(y) - 1
select_idx = np.arange(start_idx, end_idx+1)
print(("Data before selection: {}, {}".format(x.shape, y.shape)))
x = x[select_idx]
y = y[select_idx]
print(("Data after selection: {}, {}".format(x.shape, y.shape)))
# Save
filename = ntpath.basename(psg_fnames[i]).replace("-PSG.edf", ".npz")
save_dict = {
"x": x,
"y": y,
"fs": sampling_rate,
"ch_label": select_ch,
"header_raw": h_raw,
"header_annotation": h_ann,
}
np.savez(os.path.join(args.output_dir, filename), **save_dict)
print("\n=======================================\n")
if __name__ == "__main__":
main() | 34.24537 | 108 | 0.586724 |
bb9c8b1129d7157d179424792e605a42b5b40d0d | 2,565 | py | Python | src/python/zquantum/optimizers/daemon_optimizer/optimize/_api_test.py | agustinsilva447/z-quantum-optimizers | 6d1fc7d2be1d981e5e18bf4140a5386e14321916 | [
"Apache-2.0"
] | null | null | null | src/python/zquantum/optimizers/daemon_optimizer/optimize/_api_test.py | agustinsilva447/z-quantum-optimizers | 6d1fc7d2be1d981e5e18bf4140a5386e14321916 | [
"Apache-2.0"
] | null | null | null | src/python/zquantum/optimizers/daemon_optimizer/optimize/_api_test.py | agustinsilva447/z-quantum-optimizers | 6d1fc7d2be1d981e5e18bf4140a5386e14321916 | [
"Apache-2.0"
] | null | null | null | from . import optimize_variational_circuit_with_proxy
from zquantum.core.interfaces.mock_objects import MockOptimizer
from .client_mock import MockedClient
import http.client
import unittest
import random
import subprocess
class TestOptimizationServer(unittest.TestCase):
def setUp(self):
self.port = "1234"
self.ipaddress = "testing-ip"
def test_optimize_variational_circuit_with_proxy_all_zero_line(self):
# Given
client = MockedClient(self.ipaddress, self.port)
params = [0, 0]
optimizer = MockOptimizer()
# When
opt_results = optimize_variational_circuit_with_proxy(params,
optimizer, client)
# Then
self.assertEqual(opt_results['opt_value'], 0)
self.assertEqual(len(opt_results['opt_params']), 2)
self.assertEqual(opt_results['history'][0]['optimization-evaluation-ids'], ['MOCKED-ID'])
self.assertIn('value', opt_results['history'][0].keys())
self.assertIn('params', opt_results['history'][0].keys())
def test_optimize_variational_circuit_with_proxy_x_squared(self):
# Given
client = MockedClient(self.ipaddress, self.port, "return_x_squared")
params = [4]
optimizer = MockOptimizer()
# When
opt_results = optimize_variational_circuit_with_proxy(params,
optimizer, client)
# Then
self.assertGreater(opt_results['opt_value'], 0)
self.assertEqual(len(opt_results['opt_params']), 1)
self.assertEqual(opt_results['history'][0]['optimization-evaluation-ids'], ['MOCKED-ID'])
self.assertIn('value', opt_results['history'][0].keys())
self.assertIn('params', opt_results['history'][0].keys())
def test_optimize_variational_circuit_with_proxy_errors(self):
client = MockedClient(self.ipaddress, self.port)
params = [0]
optimizer = MockOptimizer()
# self.assertRaises(ValueError, lambda: optimize_variational_circuit_with_proxy(
# "Not initial params", optimizer, client))
self.assertRaises(AttributeError, lambda: optimize_variational_circuit_with_proxy(
params, "Not an optimizer object", "Not a client"))
self.assertRaises(AttributeError, lambda: optimize_variational_circuit_with_proxy(
params, optimizer, "Not a client"))
@classmethod
def tearDownClass(self):
subprocess.call(["rm", 'client_mock_evaluation_result.json',
'current_optimization_params.json']) | 41.370968 | 97 | 0.675634 |
ae928de3c7d08c4326269cd07bc1167747cf3208 | 386 | py | Python | app/categories/models.py | michel-rodrigues/viggio_backend | f419f0b939209722e1eb1e272f33de172cd5c1f1 | [
"MIT"
] | null | null | null | app/categories/models.py | michel-rodrigues/viggio_backend | f419f0b939209722e1eb1e272f33de172cd5c1f1 | [
"MIT"
] | null | null | null | app/categories/models.py | michel-rodrigues/viggio_backend | f419f0b939209722e1eb1e272f33de172cd5c1f1 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils.text import slugify
from utils.base_models import BaseModel
class Category(BaseModel):
name = models.CharField(max_length=100)
slug = models.SlugField(unique=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.slug)
super().save(*args, **kwargs)
def __str__(self):
return self.name
| 22.705882 | 43 | 0.686528 |
6e2c819818ecd30d1b6304e052a6afddf90b4872 | 1,078 | py | Python | pyclesperanto_prototype/_tier1/_greater_constant.py | haesleinhuepf/pyclesperanto_prototype | 65bc3035d3b2b61a2722c93b95bae310bfbd190e | [
"BSD-3-Clause"
] | 1 | 2021-01-15T15:32:19.000Z | 2021-01-15T15:32:19.000Z | pyclesperanto_prototype/_tier1/_greater_constant.py | haesleinhuepf/pyclesperanto_prototype | 65bc3035d3b2b61a2722c93b95bae310bfbd190e | [
"BSD-3-Clause"
] | null | null | null | pyclesperanto_prototype/_tier1/_greater_constant.py | haesleinhuepf/pyclesperanto_prototype | 65bc3035d3b2b61a2722c93b95bae310bfbd190e | [
"BSD-3-Clause"
] | null | null | null | from .._tier0 import execute
from .._tier0 import Image
from .._tier0 import plugin_function
@plugin_function(categories=['binarize', 'in assistant'])
def greater_constant(source : Image, destination :Image = None, constant :float = 0):
"""Determines if two images A and B greater pixel wise.
f(a, b) = 1 if a > b; 0 otherwise.
Parameters
----------
source : Image
destination : Image
constant : Number
Returns
-------
destination
Examples
--------
>>> import pyclesperanto_prototype as cle
>>> cle.greater_constant(source, destination, constant)
References
----------
.. [1] https://clij.github.io/clij2-docs/reference_greaterConstant
"""
parameters = {
"src1":source,
"scalar":float(constant),
"dst":destination
}
execute(__file__, '../clij-opencl-kernels/kernels/greater_constant_' + str(len(destination.shape)) + 'd_x.cl', 'greater_constant_' + str(len(destination.shape)) + 'd', destination.shape, parameters)
return destination
| 26.292683 | 202 | 0.62987 |
ff1def97ca09a22fcb18ff866514c2b0b02ba4dd | 11,602 | py | Python | pinball/scheduler/schedule.py | Betterment/pinball | 11120b54fcc25b2857631a5de65a1195ffcffb5c | [
"Apache-2.0"
] | null | null | null | pinball/scheduler/schedule.py | Betterment/pinball | 11120b54fcc25b2857631a5de65a1195ffcffb5c | [
"Apache-2.0"
] | 4 | 2020-08-31T18:11:54.000Z | 2021-06-10T23:18:45.000Z | pinball/scheduler/schedule.py | Betterment/pinball | 11120b54fcc25b2857631a5de65a1195ffcffb5c | [
"Apache-2.0"
] | null | null | null | # Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of schedule metadata included in schedule tokens.
A Schedule defines when something should run. (The thing is abstract; a
WorkflowSchedule runs workflows.) It has a starting run time, and is
(optionally) repeated periodically. A schedule also has an OverrunPolicy that
defines how it should behave if a previous run didn't finish by the time the
thing is run again through this schedule.
"""
import abc
import datetime
import math
import time
from pinball.config.pinball_config import PinballConfig
from pinball.config.utils import get_log
from pinball.config.utils import timestamp_to_str
from pinball.master.thrift_lib.ttypes import ModifyRequest
from pinball.persistence.token_data import TokenData
from pinball.scheduler.overrun_policy import OverrunPolicy
from pinball.ui.data import Status
from pinball.ui.data_builder import DataBuilder
from pinball.workflow.name import Name
from pinball.workflow.signaller import Signal
from pinball.workflow.signaller import Signaller
from pinball.workflow.utils import load_path
__author__ = 'Pawel Garbacki, Mao Ye'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = ['Pawel Garbacki', 'Mao Ye']
__license__ = 'Apache'
__version__ = '2.0'
LOG = get_log('pinball.scheduler.schedule')
class Schedule(TokenData):
"""Parent class for specialized schedule types."""
__metaclass__ = abc.ABCMeta
def __init__(self, next_run_time=None, recurrence_seconds=None,
overrun_policy=OverrunPolicy.SKIP):
self.next_run_time = next_run_time
self.recurrence_seconds = recurrence_seconds
self.overrun_policy = overrun_policy
def advance_next_run_time(self):
"""Advance the scheduled run time beyond the current time."""
now = time.time()
if self.next_run_time <= now:
# Set next run time to the lowest timestamp based off
# recurrence that is greater than the current time.
delta_runs = math.ceil((now - self.next_run_time) /
self.recurrence_seconds)
LOG.info('advancing the next run time now %f '
'next_run_time %d recurrence_seconds %d delta_runs %f',
now, self.next_run_time,
self.recurrence_seconds, delta_runs)
self.next_run_time += int(delta_runs * self.recurrence_seconds)
if now == self.next_run_time:
self.next_run_time += self.recurrence_seconds
assert self.next_run_time > now
def corresponds_to(self, schedule):
"""Assess correspondence to another schedule.
Schedules correspond to each other if their next run times are shifted
by a multiplication of recurrence seconds, and all other fields are
the same.
Args:
schedule: The schedule to compare with.
Returns:
True iff the schedules correspond to each other.
"""
if (self.overrun_policy != schedule.overrun_policy or
self.recurrence_seconds != schedule.recurrence_seconds):
return False
delta = self.next_run_time - schedule.next_run_time
delta_multiplicator = 1. * delta / self.recurrence_seconds
return delta_multiplicator == int(delta_multiplicator)
@abc.abstractmethod
def run(self, emailer, store):
"""Run the routine pointed to by this schedule."""
return None
@abc.abstractmethod
def is_running(self, store):
"""Checks if the previous run is still active.
Args:
store: The store to query for status.
Returns:
True iff the run is running.
"""
return False
@abc.abstractmethod
def is_failed(self, store):
"""Checks if the most recent run has failed.
Args:
store: The store to query for status.
Returns:
True iff the run has failed.
"""
return False
@abc.abstractmethod
def abort_running(self, client, store):
"""Abort all active runs.
Args:
client: The client to communicate with the master.
store: The store to retrieve runs status.
Returns:
True iff the workflow has been aborted.
"""
return False
class WorkflowSchedule(Schedule):
"""Schedule for a workflow."""
def __init__(
self,
next_run_time=None,
recurrence_seconds=None,
overrun_policy=OverrunPolicy.SKIP,
parser_params=PinballConfig.PARSER_PARAMS,
workflow=None,
emails=None,
max_running_instances=None):
Schedule.__init__(self, next_run_time, recurrence_seconds,
overrun_policy)
self.parser_params = parser_params
self.workflow = workflow
self.emails = emails if emails is not None else []
self.max_running_instances = max_running_instances if max_running_instances \
else PinballConfig.DEFAULT_MAX_WORKFLOW_RUNNING_INSTANCES
def __str__(self):
if self.next_run_time:
next_run_time = timestamp_to_str(self.next_run_time)
else:
next_run_time = str(self.next_run_time)
if self.recurrence_seconds:
delta = datetime.timedelta(seconds=self.recurrence_seconds)
recurrence = str(delta)
else:
recurrence = str(self.recurrence_seconds)
if self.overrun_policy is not None:
overrun_policy = OverrunPolicy.to_string(self.overrun_policy)
else:
overrun_policy = str(self.overrun_policy)
return ('WorkflowSchedule(next_run_time=%s, recurrence=%s, '
'overrun_policy=%s, parser_params=%s, workflow=%s, '
'email=%s, max_running_instances=%s)' % (next_run_time,
recurrence,
overrun_policy,
self.parser_params,
self.workflow,
self.emails,
str(self.max_running_instances)))
def __repr__(self):
return self.__str__()
def advance_next_run_time(self):
# TODO(pawel): remove after debugging.
LOG.info('advancing the next run time for workflow %s', self.workflow)
super(WorkflowSchedule, self).advance_next_run_time()
def corresponds_to(self, schedule):
if (self.parser_params != schedule.parser_params or
self.workflow != schedule.workflow or
self.emails != schedule.emails or
self.max_running_instances != schedule.max_running_instances):
return False
return super(WorkflowSchedule, self).corresponds_to(schedule)
def run(self, emailer, store):
if not self._check_workflow_instances(emailer, self.workflow, store):
LOG.warn('too many instances running for workflow %s', self.workflow)
return None
config_parser = load_path(PinballConfig.PARSER)(self.parser_params)
workflow_tokens = config_parser.get_workflow_tokens(self.workflow)
if not workflow_tokens:
LOG.error('workflow %s not found', self.workflow)
return None
result = ModifyRequest()
result.updates = workflow_tokens
assert result.updates
token = result.updates[0]
name = Name.from_job_token_name(token.name)
if not name.instance:
name = Name.from_event_token_name(token.name)
LOG.info('exporting workflow %s instance %s. Its tokens are under %s',
name.workflow, name.instance, name.get_instance_prefix())
return result
def is_running(self, store):
data_builder = DataBuilder(store, use_cache=True)
workflow_data = data_builder.get_workflow(self.workflow)
if not workflow_data:
return False
return workflow_data.status == Status.RUNNING
def is_failed(self, store):
data_builder = DataBuilder(store, use_cache=True)
workflow_data = data_builder.get_workflow(self.workflow)
if not workflow_data:
return False
return (workflow_data.status != Status.RUNNING and
workflow_data.status != Status.SUCCESS)
def _get_running_instances(self, store):
"""Find running instances of the workflow.
Args:
store: The store to query for wokflow instance status.
Returns:
List of running workflow instance names.
"""
data_builder = DataBuilder(store, use_cache=True)
instances = data_builder.get_instances(self.workflow)
result = []
for instance in instances:
if instance.status == Status.RUNNING:
result.append(instance.instance)
return result
def abort_running(self, client, store):
running_instances = self._get_running_instances(store)
for instance in running_instances:
signaller = Signaller(client,
workflow=self.workflow,
instance=instance)
signaller.set_action(Signal.ABORT)
if not signaller.is_action_set(Signal.ABORT):
return False
return True
def _check_workflow_instances(self, emailer, workflow_name, store):
"""Check the number of running instances of the workflow.
Besides of the return, also send out warning email if too many
instances running for the given workflow.
Args:
emailer: The email sender.
workflow_name: Name of the workflow.
store: The store to retrieve runs status.
Returns:
False if running instance number exceeds the max_running_instances setting;
Otherwise, True.
"""
running_instances = self._get_running_instances(store)
if self.max_running_instances and len(running_instances) >= self.max_running_instances:
LOG.warn('Too many (%s) instances running for workflow %s !',
len(running_instances), workflow_name)
if emailer:
emailer.send_too_many_running_instances_warning_message(self.emails,
workflow_name,
len(running_instances),
self.max_running_instances)
else:
LOG.warn('Emailer is not set! Failed to send too many instances running warning '
'email for workflow %s', workflow_name)
return False
return True
| 40.145329 | 99 | 0.627133 |
a9104599e2e7e2d4d78b2d9cc05901caf933b14e | 51,984 | py | Python | octodns/provider/ns1.py | jmtorres/octodns | 6188275dbef88b7a41c310364b32851d58532ec9 | [
"MIT"
] | null | null | null | octodns/provider/ns1.py | jmtorres/octodns | 6188275dbef88b7a41c310364b32851d58532ec9 | [
"MIT"
] | 24 | 2021-01-01T07:27:26.000Z | 2021-08-01T05:57:13.000Z | octodns/provider/ns1.py | 0xflotus/octodns | b3aa56f36f551effc28582564320ddd75ba20b11 | [
"MIT"
] | null | null | null | #
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from logging import getLogger
from itertools import chain
from collections import Mapping, OrderedDict, defaultdict
from ns1 import NS1
from ns1.rest.errors import RateLimitException, ResourceException
from pycountry_convert import country_alpha2_to_continent_code
from time import sleep
from uuid import uuid4
from six import text_type
from ..record import Record, Update
from .base import BaseProvider
class Ns1Exception(Exception):
pass
class Ns1Client(object):
log = getLogger('NS1Client')
def __init__(self, api_key, parallelism=None, retry_count=4,
client_config=None):
self.log.debug('__init__: parallelism=%s, retry_count=%d, '
'client_config=%s', parallelism, retry_count,
client_config)
self.retry_count = retry_count
client = NS1(apiKey=api_key)
# NS1 rate limits via a "token bucket" scheme, and provides information
# about rate limiting in headers on responses. Token bucket can be
# thought of as an initially "full" bucket, where, if not full, tokens
# are added at some rate. This allows "bursting" requests until the
# bucket is empty, after which, you are limited to the rate of token
# replenishment.
# There are a couple of "strategies" built into the SDK to avoid 429s
# from rate limiting. Since octodns operates concurrently via
# `max_workers`, a concurrent strategy seems appropriate.
# This strategy does nothing until the remaining requests are equal to
# or less than our `parallelism`, after which, each process will sleep
# for the token replenishment interval times parallelism.
# For example, if we can make 10 requests in 60 seconds, a token is
# replenished every 6 seconds. If parallelism is 3, we will burst 7
# requests, and subsequently each process will sleep for 18 seconds
# before making another request.
# In general, parallelism should match the number of workers.
if parallelism is not None:
client.config['rate_limit_strategy'] = 'concurrent'
client.config['parallelism'] = parallelism
# The list of records for a zone is paginated at around ~2.5k records,
# this tells the client to handle any of that transparently and ensure
# we get the full list of records.
client.config['follow_pagination'] = True
# additional options or overrides
if isinstance(client_config, Mapping):
for k, v in client_config.items():
client.config[k] = v
self._client = client
self._records = client.records()
self._zones = client.zones()
self._monitors = client.monitors()
self._notifylists = client.notifylists()
self._datasource = client.datasource()
self._datafeed = client.datafeed()
self._datasource_id = None
self._feeds_for_monitors = None
self._monitors_cache = None
@property
def datasource_id(self):
if self._datasource_id is None:
name = 'octoDNS NS1 Data Source'
source = None
for candidate in self.datasource_list():
if candidate['name'] == name:
# Found it
source = candidate
break
if source is None:
self.log.info('datasource_id: creating datasource %s', name)
# We need to create it
source = self.datasource_create(name=name,
sourcetype='nsone_monitoring')
self.log.info('datasource_id: id=%s', source['id'])
self._datasource_id = source['id']
return self._datasource_id
@property
def feeds_for_monitors(self):
if self._feeds_for_monitors is None:
self.log.debug('feeds_for_monitors: fetching & building')
self._feeds_for_monitors = {
f['config']['jobid']: f['id']
for f in self.datafeed_list(self.datasource_id)
}
return self._feeds_for_monitors
@property
def monitors(self):
if self._monitors_cache is None:
self.log.debug('monitors: fetching & building')
self._monitors_cache = \
{m['id']: m for m in self.monitors_list()}
return self._monitors_cache
def datafeed_create(self, sourceid, name, config):
ret = self._try(self._datafeed.create, sourceid, name, config)
self.feeds_for_monitors[config['jobid']] = ret['id']
return ret
def datafeed_delete(self, sourceid, feedid):
ret = self._try(self._datafeed.delete, sourceid, feedid)
self._feeds_for_monitors = {
k: v for k, v in self._feeds_for_monitors.items() if v != feedid
}
return ret
def datafeed_list(self, sourceid):
return self._try(self._datafeed.list, sourceid)
def datasource_create(self, **body):
return self._try(self._datasource.create, **body)
def datasource_list(self):
return self._try(self._datasource.list)
def monitors_create(self, **params):
body = {}
ret = self._try(self._monitors.create, body, **params)
self.monitors[ret['id']] = ret
return ret
def monitors_delete(self, jobid):
ret = self._try(self._monitors.delete, jobid)
self.monitors.pop(jobid)
return ret
def monitors_list(self):
return self._try(self._monitors.list)
def monitors_update(self, job_id, **params):
body = {}
ret = self._try(self._monitors.update, job_id, body, **params)
self.monitors[ret['id']] = ret
return ret
def notifylists_delete(self, nlid):
return self._try(self._notifylists.delete, nlid)
def notifylists_create(self, **body):
return self._try(self._notifylists.create, body)
def notifylists_list(self):
return self._try(self._notifylists.list)
def records_create(self, zone, domain, _type, **params):
return self._try(self._records.create, zone, domain, _type, **params)
def records_delete(self, zone, domain, _type):
return self._try(self._records.delete, zone, domain, _type)
def records_retrieve(self, zone, domain, _type):
return self._try(self._records.retrieve, zone, domain, _type)
def records_update(self, zone, domain, _type, **params):
return self._try(self._records.update, zone, domain, _type, **params)
def zones_create(self, name):
return self._try(self._zones.create, name)
def zones_retrieve(self, name):
return self._try(self._zones.retrieve, name)
def _try(self, method, *args, **kwargs):
tries = self.retry_count
while True: # We'll raise to break after our tries expire
try:
return method(*args, **kwargs)
except RateLimitException as e:
if tries <= 1:
raise
period = float(e.period)
self.log.warn('rate limit encountered, pausing '
'for %ds and trying again, %d remaining',
period, tries)
sleep(period)
tries -= 1
class Ns1Provider(BaseProvider):
'''
Ns1 provider
ns1:
# Required
class: octodns.provider.ns1.Ns1Provider
api_key: env/NS1_API_KEY
# Only required if using dynamic records
monitor_regions:
- lga
# Optional. Default: None. If set, back off in advance to avoid 429s
# from rate-limiting. Generally this should be set to the number
# of processes or workers hitting the API, e.g. the value of
# `max_workers`.
parallelism: 11
# Optional. Default: 4. Number of times to retry if a 429 response
# is received.
retry_count: 4
# Optional. Default: None. Additional options or overrides passed to
# the NS1 SDK config, as key-value pairs.
client_config:
endpoint: my.nsone.endpoint # Default: api.nsone.net
ignore-ssl-errors: true # Default: false
follow_pagination: false # Default: true
'''
SUPPORTS_GEO = True
SUPPORTS_DYNAMIC = True
SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'MX', 'NAPTR',
'NS', 'PTR', 'SPF', 'SRV', 'TXT'))
ZONE_NOT_FOUND_MESSAGE = 'server error: zone not found'
def _update_filter(self, filter, with_disabled):
if with_disabled:
filter['disabled'] = False
return (dict(sorted(filter.items(), key=lambda t: t[0])))
return filter
def _UP_FILTER(self, with_disabled):
return self._update_filter({
'config': {},
'filter': 'up'
}, with_disabled)
def _REGION_FILTER(self, with_disabled):
return self._update_filter({
'config': {
'remove_no_georegion': True
},
'filter': u'geofence_regional'
}, with_disabled)
def _COUNTRY_FILTER(self, with_disabled):
return self._update_filter({
'config': {
'remove_no_location': True
},
'filter': u'geofence_country'
}, with_disabled)
# In the NS1 UI/portal, this filter is called "SELECT FIRST GROUP" though
# the filter name in the NS1 api is 'select_first_region'
def _SELECT_FIRST_REGION_FILTER(self, with_disabled):
return self._update_filter({
'config': {},
'filter': u'select_first_region'
}, with_disabled)
def _PRIORITY_FILTER(self, with_disabled):
return self._update_filter({
'config': {
'eliminate': u'1'
},
'filter': 'priority'
}, with_disabled)
def _WEIGHTED_SHUFFLE_FILTER(self, with_disabled):
return self._update_filter({
'config': {},
'filter': u'weighted_shuffle'
}, with_disabled)
def _SELECT_FIRST_N_FILTER(self, with_disabled):
return self._update_filter({
'config': {
'N': u'1'
},
'filter': u'select_first_n'
}, with_disabled)
def _BASIC_FILTER_CHAIN(self, with_disabled):
return [
self._UP_FILTER(with_disabled),
self._SELECT_FIRST_REGION_FILTER(with_disabled),
self._PRIORITY_FILTER(with_disabled),
self._WEIGHTED_SHUFFLE_FILTER(with_disabled),
self._SELECT_FIRST_N_FILTER(with_disabled)
]
def _FILTER_CHAIN_WITH_REGION(self, with_disabled):
return [
self._UP_FILTER(with_disabled),
self._REGION_FILTER(with_disabled),
self._SELECT_FIRST_REGION_FILTER(with_disabled),
self._PRIORITY_FILTER(with_disabled),
self._WEIGHTED_SHUFFLE_FILTER(with_disabled),
self._SELECT_FIRST_N_FILTER(with_disabled)
]
def _FILTER_CHAIN_WITH_COUNTRY(self, with_disabled):
return [
self._UP_FILTER(with_disabled),
self._COUNTRY_FILTER(with_disabled),
self._SELECT_FIRST_REGION_FILTER(with_disabled),
self._PRIORITY_FILTER(with_disabled),
self._WEIGHTED_SHUFFLE_FILTER(with_disabled),
self._SELECT_FIRST_N_FILTER(with_disabled)
]
def _FILTER_CHAIN_WITH_REGION_AND_COUNTRY(self, with_disabled):
return [
self._UP_FILTER(with_disabled),
self._REGION_FILTER(with_disabled),
self._COUNTRY_FILTER(with_disabled),
self._SELECT_FIRST_REGION_FILTER(with_disabled),
self._PRIORITY_FILTER(with_disabled),
self._WEIGHTED_SHUFFLE_FILTER(with_disabled),
self._SELECT_FIRST_N_FILTER(with_disabled)
]
_REGION_TO_CONTINENT = {
'AFRICA': 'AF',
'ASIAPAC': 'AS',
'EUROPE': 'EU',
'SOUTH-AMERICA': 'SA',
'US-CENTRAL': 'NA',
'US-EAST': 'NA',
'US-WEST': 'NA',
}
_CONTINENT_TO_REGIONS = {
'AF': ('AFRICA',),
'AS': ('ASIAPAC',),
'EU': ('EUROPE',),
'SA': ('SOUTH-AMERICA',),
# TODO: what about CA, MX, and all the other NA countries?
'NA': ('US-CENTRAL', 'US-EAST', 'US-WEST'),
}
# Necessary for handling unsupported continents in _CONTINENT_TO_REGIONS
_CONTINENT_TO_LIST_OF_COUNTRIES = {
'OC': {'FJ', 'NC', 'PG', 'SB', 'VU', 'AU', 'NF', 'NZ', 'FM', 'GU',
'KI', 'MH', 'MP', 'NR', 'PW', 'AS', 'CK', 'NU', 'PF', 'PN',
'TK', 'TO', 'TV', 'WF', 'WS'},
}
def __init__(self, id, api_key, retry_count=4, monitor_regions=None,
parallelism=None, client_config=None, *args, **kwargs):
self.log = getLogger('Ns1Provider[{}]'.format(id))
self.log.debug('__init__: id=%s, api_key=***, retry_count=%d, '
'monitor_regions=%s, parallelism=%s, client_config=%s',
id, retry_count, monitor_regions, parallelism,
client_config)
super(Ns1Provider, self).__init__(id, *args, **kwargs)
self.monitor_regions = monitor_regions
self._client = Ns1Client(api_key, parallelism, retry_count,
client_config)
def _valid_filter_config(self, filter_cfg, domain):
with_disabled = self._disabled_flag_in_filters(filter_cfg, domain)
has_region = self._REGION_FILTER(with_disabled) in filter_cfg
has_country = self._COUNTRY_FILTER(with_disabled) in filter_cfg
expected_filter_cfg = self._get_updated_filter_chain(has_region,
has_country,
with_disabled)
return filter_cfg == expected_filter_cfg
def _get_updated_filter_chain(self, has_region, has_country,
with_disabled=True):
if has_region and has_country:
filter_chain = self._FILTER_CHAIN_WITH_REGION_AND_COUNTRY(
with_disabled)
elif has_region:
filter_chain = self._FILTER_CHAIN_WITH_REGION(with_disabled)
elif has_country:
filter_chain = self._FILTER_CHAIN_WITH_COUNTRY(with_disabled)
else:
filter_chain = self._BASIC_FILTER_CHAIN(with_disabled)
return filter_chain
def _encode_notes(self, data):
return ' '.join(['{}:{}'.format(k, v)
for k, v in sorted(data.items())])
def _parse_notes(self, note):
data = {}
if note:
for piece in note.split(' '):
try:
k, v = piece.split(':', 1)
data[k] = v if v != '' else None
except ValueError:
pass
return data
def _data_for_geo_A(self, _type, record):
# record meta (which would include geo information is only
# returned when getting a record's detail, not from zone detail
geo = defaultdict(list)
data = {
'ttl': record['ttl'],
'type': _type,
}
values, codes = [], []
for answer in record.get('answers', []):
meta = answer.get('meta', {})
if meta:
# country + state and country + province are allowed
# in that case though, supplying a state/province would
# be redundant since the country would supercede in when
# resolving the record. it is syntactically valid, however.
country = meta.get('country', [])
us_state = meta.get('us_state', [])
ca_province = meta.get('ca_province', [])
for cntry in country:
con = country_alpha2_to_continent_code(cntry)
key = '{}-{}'.format(con, cntry)
geo[key].extend(answer['answer'])
for state in us_state:
key = 'NA-US-{}'.format(state)
geo[key].extend(answer['answer'])
for province in ca_province:
key = 'NA-CA-{}'.format(province)
geo[key].extend(answer['answer'])
for code in meta.get('iso_region_code', []):
key = code
geo[key].extend(answer['answer'])
else:
values.extend(answer['answer'])
codes.append([])
values = [text_type(x) for x in values]
geo = OrderedDict(
{text_type(k): [text_type(x) for x in v] for k, v in geo.items()}
)
data['values'] = values
data['geo'] = geo
return data
def _parse_dynamic_pool_name(self, pool_name):
if pool_name.startswith('catchall__'):
# Special case for the old-style catchall prefix
return pool_name[10:]
try:
pool_name, _ = pool_name.rsplit('__', 1)
except ValueError:
pass
return pool_name
def _data_for_dynamic(self, _type, record):
# First make sure we have the expected filters config
if not self._valid_filter_config(record['filters'], record['domain']):
self.log.error('_data_for_dynamic: %s %s has unsupported '
'filters', record['domain'], _type)
raise Ns1Exception('Unrecognized advanced record')
# All regions (pools) will include the list of default values
# (eventually) at higher priorities, we'll just add them to this set to
# we'll have the complete collection.
default = set()
# Fill out the pools by walking the answers and looking at their
# region.
pools = defaultdict(lambda: {'fallback': None, 'values': []})
for answer in record['answers']:
meta = answer['meta']
notes = self._parse_notes(meta.get('note', ''))
value = text_type(answer['answer'][0])
if notes.get('from', False) == '--default--':
# It's a final/default value, record it and move on
default.add(value)
continue
# NS1 pool names can be found in notes > v0.9.11, in order to allow
# us to find fallback-only pools/values. Before that we used
# `region` (group name in the UI) and only paid attention to
# priority=1 (first level)
notes_pool_name = notes.get('pool', None)
if notes_pool_name is None:
# < v0.9.11
if meta['priority'] != 1:
# Ignore all but priority 1
continue
# And use region's pool name as the pool name
pool_name = self._parse_dynamic_pool_name(answer['region'])
else:
# > v0.9.11, use the notes-based name and consider all values
pool_name = notes_pool_name
pool = pools[pool_name]
value_dict = {
'value': value,
'weight': int(meta.get('weight', 1)),
}
if value_dict not in pool['values']:
# If we haven't seen this value before add it to the pool
pool['values'].append(value_dict)
# If there's a fallback recorded in the value for its pool go ahead
# and use it, another v0.9.11 thing
fallback = notes.get('fallback', None)
if fallback is not None:
pool['fallback'] = fallback
# The regions objects map to rules, but it's a bit fuzzy since they're
# tied to pools on the NS1 side, e.g. we can only have 1 rule per pool,
# that may eventually run into problems, but I don't have any use-cases
# examples currently where it would
rules = {}
for pool_name, region in sorted(record['regions'].items()):
# Get the actual pool name by removing the type
pool_name = self._parse_dynamic_pool_name(pool_name)
meta = region['meta']
notes = self._parse_notes(meta.get('note', ''))
rule_order = notes['rule-order']
try:
rule = rules[rule_order]
except KeyError:
rule = {
'pool': pool_name,
'_order': rule_order,
}
rules[rule_order] = rule
# The group notes field in the UI is a `note` on the region here,
# that's where we can find our pool's fallback in < v0.9.11 anyway
if 'fallback' in notes:
# set the fallback pool name
pools[pool_name]['fallback'] = notes['fallback']
geos = set()
# continents are mapped (imperfectly) to regions, but what about
# Canada/North America
for georegion in meta.get('georegion', []):
geos.add(self._REGION_TO_CONTINENT[georegion])
# Countries are easy enough to map, we just have to find their
# continent
#
# NOTE: Special handling for Oceania
# NS1 doesn't support Oceania as a region. So the Oceania countries
# will be present in meta['country']. If all the countries in the
# Oceania countries list are found, set the region to OC and remove
# individual oceania country entries
oc_countries = set()
for country in meta.get('country', []):
# country_alpha2_to_continent_code fails for Pitcairn ('PN')
if country == 'PN':
con = 'OC'
else:
con = country_alpha2_to_continent_code(country)
if con == 'OC':
oc_countries.add(country)
else:
# Adding only non-OC countries here to geos
geos.add('{}-{}'.format(con, country))
if oc_countries:
if oc_countries == self._CONTINENT_TO_LIST_OF_COUNTRIES['OC']:
# All OC countries found, so add 'OC' to geos
geos.add('OC')
else:
# Partial OC countries found, just add them as-is to geos
for c in oc_countries:
geos.add('{}-{}'.format('OC', c))
# States are easy too, just assume NA-US (CA providences aren't
# supported by octoDNS currently)
for state in meta.get('us_state', []):
geos.add('NA-US-{}'.format(state))
if geos:
# There are geos, combine them with any existing geos for this
# pool and recorded the sorted unique set of them
rule['geos'] = sorted(set(rule.get('geos', [])) | geos)
# Order and convert to a list
default = sorted(default)
# Convert to list and order
rules = list(rules.values())
rules.sort(key=lambda r: (r['_order'], r['pool']))
data = {
'dynamic': {
'pools': pools,
'rules': rules,
},
'ttl': record['ttl'],
'type': _type,
}
if _type == 'CNAME':
data['value'] = default[0]
else:
data['values'] = default
return data
def _data_for_A(self, _type, record):
if record.get('tier', 1) > 1:
# Advanced record, see if it's first answer has a note
try:
first_answer_note = record['answers'][0]['meta']['note']
except (IndexError, KeyError):
first_answer_note = ''
# If that note includes a `from` (pool name) it's a dynamic record
if 'from:' in first_answer_note:
return self._data_for_dynamic(_type, record)
# If not it's an old geo record
return self._data_for_geo_A(_type, record)
# This is a basic record, just convert it
return {
'ttl': record['ttl'],
'type': _type,
'values': [text_type(x) for x in record['short_answers']]
}
_data_for_AAAA = _data_for_A
def _data_for_SPF(self, _type, record):
values = [v.replace(';', '\\;') for v in record['short_answers']]
return {
'ttl': record['ttl'],
'type': _type,
'values': values
}
_data_for_TXT = _data_for_SPF
def _data_for_CAA(self, _type, record):
values = []
for answer in record['short_answers']:
flags, tag, value = answer.split(' ', 2)
values.append({
'flags': flags,
'tag': tag,
'value': value,
})
return {
'ttl': record['ttl'],
'type': _type,
'values': values,
}
def _data_for_CNAME(self, _type, record):
if record.get('tier', 1) > 1:
# Advanced dynamic record
return self._data_for_dynamic(_type, record)
try:
value = record['short_answers'][0]
except IndexError:
value = None
return {
'ttl': record['ttl'],
'type': _type,
'value': value,
}
_data_for_ALIAS = _data_for_CNAME
_data_for_PTR = _data_for_CNAME
def _data_for_MX(self, _type, record):
values = []
for answer in record['short_answers']:
preference, exchange = answer.split(' ', 1)
values.append({
'preference': preference,
'exchange': exchange,
})
return {
'ttl': record['ttl'],
'type': _type,
'values': values,
}
def _data_for_NAPTR(self, _type, record):
values = []
for answer in record['short_answers']:
order, preference, flags, service, regexp, replacement = \
answer.split(' ', 5)
values.append({
'flags': flags,
'order': order,
'preference': preference,
'regexp': regexp,
'replacement': replacement,
'service': service,
})
return {
'ttl': record['ttl'],
'type': _type,
'values': values,
}
def _data_for_NS(self, _type, record):
return {
'ttl': record['ttl'],
'type': _type,
'values': [a if a.endswith('.') else '{}.'.format(a)
for a in record['short_answers']],
}
def _data_for_SRV(self, _type, record):
values = []
for answer in record['short_answers']:
priority, weight, port, target = answer.split(' ', 3)
values.append({
'priority': priority,
'weight': weight,
'port': port,
'target': target,
})
return {
'ttl': record['ttl'],
'type': _type,
'values': values,
}
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s',
zone.name,
target, lenient)
try:
ns1_zone_name = zone.name[:-1]
ns1_zone = self._client.zones_retrieve(ns1_zone_name)
records = []
geo_records = []
# change answers for certain types to always be absolute
for record in ns1_zone['records']:
if record['type'] in ['ALIAS', 'CNAME', 'MX', 'NS', 'PTR',
'SRV']:
for i, a in enumerate(record['short_answers']):
if not a.endswith('.'):
record['short_answers'][i] = '{}.'.format(a)
if record.get('tier', 1) > 1:
# Need to get the full record data for geo records
record = self._client.records_retrieve(ns1_zone_name,
record['domain'],
record['type'])
geo_records.append(record)
else:
records.append(record)
exists = True
except ResourceException as e:
if e.message != self.ZONE_NOT_FOUND_MESSAGE:
raise
records = []
geo_records = []
exists = False
before = len(zone.records)
# geo information isn't returned from the main endpoint, so we need
# to query for all records with geo information
zone_hash = {}
for record in chain(records, geo_records):
_type = record['type']
if _type not in self.SUPPORTS:
continue
data_for = getattr(self, '_data_for_{}'.format(_type))
name = zone.hostname_from_fqdn(record['domain'])
data = data_for(_type, record)
record = Record.new(zone, name, data, source=self, lenient=lenient)
zone_hash[(_type, name)] = record
[zone.add_record(r, lenient=lenient) for r in zone_hash.values()]
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _params_for_geo_A(self, record):
# purposefully set non-geo answers to have an empty meta,
# so that we know we did this on purpose if/when troubleshooting
params = {
'answers': [{"answer": [x], "meta": {}} for x in record.values],
'ttl': record.ttl,
}
has_country = False
for iso_region, target in record.geo.items():
key = 'iso_region_code'
value = iso_region
if not has_country and len(value.split('-')) > 1:
has_country = True
for answer in target.values:
params['answers'].append(
{
'answer': [answer],
'meta': {key: [value]},
},
)
params['filters'] = []
if has_country:
params['filters'].append(
{"filter": "shuffle", "config": {}}
)
params['filters'].append(
{"filter": "geotarget_country", "config": {}}
)
params['filters'].append(
{"filter": "select_first_n",
"config": {"N": 1}}
)
return params, None
def _monitors_for(self, record):
monitors = {}
if getattr(record, 'dynamic', False):
expected_host = record.fqdn[:-1]
expected_type = record._type
for monitor in self._client.monitors.values():
data = self._parse_notes(monitor['notes'])
if expected_host == data['host'] and \
expected_type == data['type']:
# This monitor does not belong to this record
config = monitor['config']
value = config['host']
if record._type == 'CNAME':
# Append a trailing dot for CNAME records so that
# lookup by a CNAME answer works
value = value + '.'
monitors[value] = monitor
return monitors
def _uuid(self):
return uuid4().hex
def _feed_create(self, monitor):
monitor_id = monitor['id']
self.log.debug('_feed_create: monitor=%s', monitor_id)
# TODO: looks like length limit is 64 char
name = '{} - {}'.format(monitor['name'], self._uuid()[:6])
# Create the data feed
config = {
'jobid': monitor_id,
}
feed = self._client.datafeed_create(self._client.datasource_id, name,
config)
feed_id = feed['id']
self.log.debug('_feed_create: feed=%s', feed_id)
return feed_id
def _monitor_create(self, monitor):
self.log.debug('_monitor_create: monitor="%s"', monitor['name'])
# Create the notify list
notify_list = [{
'config': {
'sourceid': self._client.datasource_id,
},
'type': 'datafeed',
}]
nl = self._client.notifylists_create(name=monitor['name'],
notify_list=notify_list)
nl_id = nl['id']
self.log.debug('_monitor_create: notify_list=%s', nl_id)
# Create the monitor
monitor['notify_list'] = nl_id
monitor = self._client.monitors_create(**monitor)
monitor_id = monitor['id']
self.log.debug('_monitor_create: monitor=%s', monitor_id)
return monitor_id, self._feed_create(monitor)
def _monitor_gen(self, record, value):
host = record.fqdn[:-1]
_type = record._type
if _type == 'CNAME':
# NS1 does not accept a host value with a trailing dot
value = value[:-1]
ret = {
'active': True,
'config': {
'connect_timeout': 2000,
'host': value,
'port': record.healthcheck_port,
'response_timeout': 10000,
'ssl': record.healthcheck_protocol == 'HTTPS',
},
'frequency': 60,
'job_type': 'tcp',
'name': '{} - {} - {}'.format(host, _type, value),
'notes': self._encode_notes({
'host': host,
'type': _type,
}),
'policy': 'quorum',
'rapid_recheck': False,
'region_scope': 'fixed',
'regions': self.monitor_regions,
}
if record.healthcheck_protocol != 'TCP':
# IF it's HTTP we need to send the request string
path = record.healthcheck_path
host = record.healthcheck_host
request = r'GET {path} HTTP/1.0\r\nHost: {host}\r\n' \
r'User-agent: NS1\r\n\r\n'.format(path=path, host=host)
ret['config']['send'] = request
# We'll also expect a HTTP response
ret['rules'] = [{
'comparison': 'contains',
'key': 'output',
'value': '200 OK',
}]
return ret
def _monitor_is_match(self, expected, have):
# Make sure what we have matches what's in expected exactly. Anything
# else in have will be ignored.
for k, v in expected.items():
if have.get(k, '--missing--') != v:
return False
return True
def _monitor_sync(self, record, value, existing):
self.log.debug('_monitor_sync: record=%s, value=%s', record.fqdn,
value)
expected = self._monitor_gen(record, value)
if existing:
self.log.debug('_monitor_sync: existing=%s', existing['id'])
monitor_id = existing['id']
if not self._monitor_is_match(expected, existing):
self.log.debug('_monitor_sync: existing needs update')
# Update the monitor to match expected, everything else will be
# left alone and assumed correct
self._client.monitors_update(monitor_id, **expected)
feed_id = self._client.feeds_for_monitors.get(monitor_id)
if feed_id is None:
self.log.warn('_monitor_sync: %s (%s) missing feed, creating',
existing['name'], monitor_id)
feed_id = self._feed_create(existing)
else:
self.log.debug('_monitor_sync: needs create')
# We don't have an existing monitor create it (and related bits)
monitor_id, feed_id = self._monitor_create(expected)
return monitor_id, feed_id
def _monitors_gc(self, record, active_monitor_ids=None):
self.log.debug('_monitors_gc: record=%s, active_monitor_ids=%s',
record.fqdn, active_monitor_ids)
if active_monitor_ids is None:
active_monitor_ids = set()
for monitor in self._monitors_for(record).values():
monitor_id = monitor['id']
if monitor_id in active_monitor_ids:
continue
self.log.debug('_monitors_gc: deleting %s', monitor_id)
feed_id = self._client.feeds_for_monitors.get(monitor_id)
if feed_id:
self._client.datafeed_delete(self._client.datasource_id,
feed_id)
self._client.monitors_delete(monitor_id)
notify_list_id = monitor['notify_list']
self._client.notifylists_delete(notify_list_id)
def _add_answers_for_pool(self, answers, default_answers, pool_name,
pool_label, pool_answers, pools, priority):
current_pool_name = pool_name
seen = set()
while current_pool_name and current_pool_name not in seen:
seen.add(current_pool_name)
pool = pools[current_pool_name]
for answer in pool_answers[current_pool_name]:
fallback = pool.data['fallback']
answer = {
'answer': answer['answer'],
'meta': {
'priority': priority,
'note': self._encode_notes({
'from': pool_label,
'pool': current_pool_name,
'fallback': fallback or '',
}),
'up': {
'feed': answer['feed_id'],
},
'weight': answer['weight'],
},
'region': pool_label, # the one we're answering
}
answers.append(answer)
current_pool_name = pool.data.get('fallback', None)
priority += 1
# Static/default
for answer in default_answers:
answer = {
'answer': answer['answer'],
'meta': {
'priority': priority,
'note': self._encode_notes({
'from': '--default--',
}),
'up': True,
'weight': 1,
},
'region': pool_label, # the one we're answering
}
answers.append(answer)
def _params_for_dynamic(self, record):
pools = record.dynamic.pools
# Convert rules to regions
has_country = False
has_region = False
regions = {}
for i, rule in enumerate(record.dynamic.rules):
pool_name = rule.data['pool']
notes = {
'rule-order': i,
}
fallback = pools[pool_name].data.get('fallback', None)
if fallback:
notes['fallback'] = fallback
country = set()
georegion = set()
us_state = set()
for geo in rule.data.get('geos', []):
n = len(geo)
if n == 8:
# US state, e.g. NA-US-KY
us_state.add(geo[-2:])
# For filtering. State filtering is done by the country
# filter
has_country = True
elif n == 5:
# Country, e.g. EU-FR
country.add(geo[-2:])
has_country = True
else:
# Continent, e.g. AS
if geo in self._CONTINENT_TO_REGIONS:
georegion.update(self._CONTINENT_TO_REGIONS[geo])
has_region = True
else:
# No maps for geo in _CONTINENT_TO_REGIONS.
# Use the country list
self.log.debug('Converting geo {} to country list'.
format(geo))
for c in self._CONTINENT_TO_LIST_OF_COUNTRIES[geo]:
country.add(c)
has_country = True
meta = {
'note': self._encode_notes(notes),
}
if georegion:
georegion_meta = dict(meta)
georegion_meta['georegion'] = sorted(georegion)
regions['{}__georegion'.format(pool_name)] = {
'meta': georegion_meta,
}
if country or us_state:
# If there's country and/or states its a country pool,
# countries and states can coexist as they're handled by the
# same step in the filterchain (countries and georegions
# cannot as they're seperate stages and run the risk of
# eliminating all options)
country_state_meta = dict(meta)
if country:
country_state_meta['country'] = sorted(country)
if us_state:
country_state_meta['us_state'] = sorted(us_state)
regions['{}__country'.format(pool_name)] = {
'meta': country_state_meta,
}
if not georegion and not country and not us_state:
# If there's no targeting it's a catchall
regions['{}__catchall'.format(pool_name)] = {
'meta': meta,
}
existing_monitors = self._monitors_for(record)
active_monitors = set()
# Build a list of primary values for each pool, including their
# feed_id (monitor)
pool_answers = defaultdict(list)
for pool_name, pool in sorted(pools.items()):
for value in pool.data['values']:
weight = value['weight']
value = value['value']
existing = existing_monitors.get(value)
monitor_id, feed_id = self._monitor_sync(record, value,
existing)
active_monitors.add(monitor_id)
pool_answers[pool_name].append({
'answer': [value],
'weight': weight,
'feed_id': feed_id,
})
if record._type == 'CNAME':
default_values = [record.value]
else:
default_values = record.values
default_answers = [{
'answer': [v],
'weight': 1,
} for v in default_values]
# Build our list of answers
# The regions dictionary built above already has the required pool
# names. Iterate over them and add answers.
answers = []
for pool_name in sorted(regions.keys()):
priority = 1
# Dynamic/health checked
pool_label = pool_name
# Remove the pool type from the end of the name
pool_name = self._parse_dynamic_pool_name(pool_name)
self._add_answers_for_pool(answers, default_answers, pool_name,
pool_label, pool_answers, pools,
priority)
# Update filters as necessary
filters = self._get_updated_filter_chain(has_region, has_country)
return {
'answers': answers,
'filters': filters,
'regions': regions,
'ttl': record.ttl,
}, active_monitors
def _params_for_A(self, record):
if getattr(record, 'dynamic', False):
return self._params_for_dynamic(record)
elif hasattr(record, 'geo'):
return self._params_for_geo_A(record)
return {
'answers': record.values,
'ttl': record.ttl,
}, None
_params_for_AAAA = _params_for_A
_params_for_NS = _params_for_A
def _params_for_SPF(self, record):
# NS1 seems to be the only provider that doesn't want things
# escaped in values so we have to strip them here and add
# them when going the other way
values = [v.replace('\\;', ';') for v in record.values]
return {'answers': values, 'ttl': record.ttl}, None
_params_for_TXT = _params_for_SPF
def _params_for_CAA(self, record):
values = [(v.flags, v.tag, v.value) for v in record.values]
return {'answers': values, 'ttl': record.ttl}, None
def _params_for_CNAME(self, record):
if getattr(record, 'dynamic', False):
return self._params_for_dynamic(record)
return {'answers': [record.value], 'ttl': record.ttl}, None
_params_for_ALIAS = _params_for_CNAME
_params_for_PTR = _params_for_CNAME
def _params_for_MX(self, record):
values = [(v.preference, v.exchange) for v in record.values]
return {'answers': values, 'ttl': record.ttl}, None
def _params_for_NAPTR(self, record):
values = [(v.order, v.preference, v.flags, v.service, v.regexp,
v.replacement) for v in record.values]
return {'answers': values, 'ttl': record.ttl}, None
def _params_for_SRV(self, record):
values = [(v.priority, v.weight, v.port, v.target)
for v in record.values]
return {'answers': values, 'ttl': record.ttl}, None
def _get_ns1_filters(self, ns1_zone_name):
ns1_filters = {}
ns1_zone = {}
try:
ns1_zone = self._client.zones_retrieve(ns1_zone_name)
except ResourceException as e:
if e.message != self.ZONE_NOT_FOUND_MESSAGE:
raise
if 'records' in ns1_zone:
for ns1_record in ns1_zone['records']:
if ns1_record.get('tier', 1) > 1:
# Need to get the full record data for geo records
full_rec = self._client.records_retrieve(
ns1_zone_name,
ns1_record['domain'],
ns1_record['type'])
if 'filters' in full_rec:
filter_key = '{}.'.format(ns1_record['domain'])
ns1_filters[filter_key] = full_rec['filters']
return ns1_filters
def _disabled_flag_in_filters(self, filters, domain):
disabled_count = ['disabled' in f for f in filters].count(True)
if disabled_count and disabled_count != len(filters):
# Some filters have the disabled flag, and some don't. Disallow
exception_msg = 'Mixed disabled flag in filters for {}'.format(
domain)
raise Ns1Exception(exception_msg)
return disabled_count == len(filters)
def _extra_changes(self, desired, changes, **kwargs):
self.log.debug('_extra_changes: desired=%s', desired.name)
ns1_filters = self._get_ns1_filters(desired.name[:-1])
changed = set([c.record for c in changes])
extra = []
for record in desired.records:
if record in changed or not getattr(record, 'dynamic', False):
# Already changed, or no dynamic , no need to check it
continue
# Filter normalization
# Check if filters for existing domains need an update
# Needs an explicit check since there might be no change in the
# config at all. Filters however might still need an update
domain = '{}.{}'.format(record.name, record.zone.name)
if domain in ns1_filters:
domain_filters = ns1_filters[domain]
if not self._disabled_flag_in_filters(domain_filters, domain):
# 'disabled' entry absent in filter config. Need to update
# filters. Update record
self.log.info('_extra_changes: change in filters for %s',
domain)
extra.append(Update(record, record))
continue
for value, have in self._monitors_for(record).items():
expected = self._monitor_gen(record, value)
# TODO: find values which have missing monitors
if not self._monitor_is_match(expected, have):
self.log.info('_extra_changes: monitor mis-match for %s',
expected['name'])
extra.append(Update(record, record))
break
if not have.get('notify_list'):
self.log.info('_extra_changes: broken monitor no notify '
'list %s (%s)', have['name'], have['id'])
extra.append(Update(record, record))
break
return extra
def _apply_Create(self, ns1_zone, change):
new = change.new
zone = new.zone.name[:-1]
domain = new.fqdn[:-1]
_type = new._type
params, active_monitor_ids = \
getattr(self, '_params_for_{}'.format(_type))(new)
self._client.records_create(zone, domain, _type, **params)
self._monitors_gc(new, active_monitor_ids)
def _apply_Update(self, ns1_zone, change):
new = change.new
zone = new.zone.name[:-1]
domain = new.fqdn[:-1]
_type = new._type
params, active_monitor_ids = \
getattr(self, '_params_for_{}'.format(_type))(new)
self._client.records_update(zone, domain, _type, **params)
self._monitors_gc(new, active_monitor_ids)
def _apply_Delete(self, ns1_zone, change):
existing = change.existing
zone = existing.zone.name[:-1]
domain = existing.fqdn[:-1]
_type = existing._type
self._client.records_delete(zone, domain, _type)
self._monitors_gc(existing)
def _has_dynamic(self, changes):
for change in changes:
if getattr(change.record, 'dynamic', False):
return True
return False
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
# Make sure that if we're going to make any dynamic changes that we
# have monitor_regions configured before touching anything so we can
# abort early and not half-apply
if self._has_dynamic(changes) and self.monitor_regions is None:
raise Ns1Exception('Monitored record, but monitor_regions not set')
domain_name = desired.name[:-1]
try:
ns1_zone = self._client.zones_retrieve(domain_name)
except ResourceException as e:
if e.message != self.ZONE_NOT_FOUND_MESSAGE:
raise
self.log.debug('_apply: no matching zone, creating')
ns1_zone = self._client.zones_create(domain_name)
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name))(ns1_zone,
change)
| 37.944526 | 79 | 0.540032 |
b166b431cd82024908ab2b71afc357f20fbca001 | 36,868 | py | Python | mapper/mapping.py | unibas-dmi-hpc/MapLib | 4bc38c50c580ddd39785291726f3174665f0a06a | [
"MIT"
] | null | null | null | mapper/mapping.py | unibas-dmi-hpc/MapLib | 4bc38c50c580ddd39785291726f3174665f0a06a | [
"MIT"
] | null | null | null | mapper/mapping.py | unibas-dmi-hpc/MapLib | 4bc38c50c580ddd39785291726f3174665f0a06a | [
"MIT"
] | 1 | 2020-06-12T14:38:48.000Z | 2020-06-12T14:38:48.000Z | from collections import deque
from heapq import heappop, heappush
from itertools import product
from random import sample, randint, random, uniform
import copy
import nxmetis
import networkx as nx
import sys
import math
from .vector import Vector
from .routing import manhattan_distance
import cProfile
def profile(func):
""" Profiling function"""
def profiled_func(*args, **kwargs):
profile = cProfile.Profile()
try:
profile.enable()
result = func(*args, **kwargs)
profile.disable()
return result
finally:
profile.print_stats()
return profiled_func
class Mapping():
"""Base class for all mapping strategies."""
@profile
def __init__(self, process_graph, topology, **kwargs):
self.routed = False
self.process_graph = process_graph
self._topology = topology
for key, value in kwargs.items():
setattr(self, key, value)
self.mapping = self.map()
@property
def rev_mapping(self, mapping=None):
"""
Returns the reversed mapping
:param mapping: Mapping
:return: Reversed mapping
"""
items = mapping.items() if mapping else self.mapping.items()
reverse = {}
for proc, cord in items:
try:
reverse[cord].append(proc)
except KeyError:
reverse[cord] = [proc]
return reverse
@property
def name(self):
"""
Returns the name of the mapping strategy.
:return: Mapping name
"""
return self._name
@property
def topology(self):
"""
Returns the topology for mapping.
:return: Topology
"""
return self._topology
def to_file(self, f):
"""
Writes mapping to file
:param f: Filename
"""
f.write(self.__str__())
def cardinality(self, mapping):
"""
Returns the cardinality of a mapping
:param mapping: Mapping
:return: Cardinality
"""
_sum = 0
for u, v in self.process_graph.edges():
if self.topology.are_neighbors(mapping[u], mapping[v]):
_sum += 1
return _sum
def __str__(self):
"""
Returns the string representation of a mapping
:return: Output mapping representation
"""
s = self.__class__.__name__.lower() + '\n'
s += 'x_coord y_coord z_coord number_of_processes process_id(s)\n'
for (x, y, z), proc_list in sorted(self.rev_mapping.items()):
s += '%d %d %d %d ' % (x, y, z, len(proc_list))
s += ' '.join([str(i) for i in proc_list]) + '\n'
return s
class UDFS(Mapping):
"""Class for utilization-based depth-first mapping"""
"""Slightly changed: instead of choosing just next processor,"""
"""we look for the closest one to already mapped (if task communicate)"""
def map(self):
sys.setrecursionlimit(10000)
task_graph = self.process_graph
processor_graph = nx.Graph()
for elem in range(len(self.topology)):
x,y,z = self.topology.to_xyz(elem)
for neighbour in self.topology.neighbours(x,y,z):
processor_graph.add_edge(elem,self.topology.to_idx(neighbour))
mapping = {}
def UDFS_algorithm():
#queue_of_heaviest_task = [i[0] for i in sorted(task_graph.degree(None,'weight').items(), key=lambda item:item[1], reverse=True)]
queue_of_heaviest_task = [i[0] for i in sorted(task_graph.degree(None,'weight'), key=lambda item:item[1], reverse=True)]
processor_set = {i for i in range(len(processor_graph))}
start_node = len(processor_set)//2
start_task = queue_of_heaviest_task.pop(0)
mapping[start_task] = self.topology.to_xyz(start_node)
processor_set.remove(start_node)
set_mapped = set()
set_mapped.add(start_task)
def map_next(i,p):
while not set(task_graph.neighbors(i)).issubset(set_mapped):
queue = []
for neighbour in task_graph.neighbors(i):
if neighbour not in set_mapped:
queue.append((neighbour,task_graph.get_edge_data(i,neighbour)['weight']))
queue = [elem[0] for elem in sorted(queue,key=lambda item:item[1],reverse=True)]
near_node = [edge[0] for edge in sorted(nx.single_source_shortest_path_length(processor_graph,self.topology.to_idx(mapping[i])).items(),key=lambda item:item[1]) if edge[1]!=0]
for node in near_node:
if node in processor_set:
break
k = queue.pop(0)
mapping[k] = self.topology.to_xyz(node)
processor_set.remove(node)
set_mapped.add(k)
p = map_next(k,node)
return p
map_next(start_task,start_node)
UDFS_algorithm()
return mapping
class Greedy_All_C(Mapping):
"""Class for GreedyAllC mapping from Glantz and Meyerhenke article"""
def map(self):
task_graph = self.process_graph
processor_graph = nx.Graph()
for elem in range(len(self.topology)):
x,y,z = self.topology.to_xyz(elem)
for neighbour in self.topology.neighbours(x,y,z):
processor_graph.add_edge(elem,self.topology.to_idx(neighbour))
mapping = {}
flag = False #false if without partition, true - otherwise
def create_partition_graph(tgraph,pgraph):
partition = nxmetis.partition(tgraph,len(pgraph),None,None,'weight',None,None,None,True)
partition_graph = nx.Graph()
partition_graph.add_nodes_from(pgraph.nodes())
for i in range(len(partition[1])):
for j in range(len(partition[1])):
if j != i:
val = 0
for elem_from_i in partition[1][i]:
for elem_from_j in partition[1][j]:
if tgraph.has_edge(elem_from_i,elem_from_j):
val += tgraph.get_edge_data(elem_from_i,elem_from_j)['weight']
if val > 0:
partition_graph.add_edge(i,j,weight=val)
return partition,partition_graph
def compute_tP():
t = {}
tP_min = {}
for u_p in processor_graph.nodes():
t[u_p] = []
u_p_xyz = self.topology.to_xyz(u_p)
for v_p in processor_graph.nodes():
v_p_xyz = self.topology.to_xyz(v_p)
if u_p == v_p:
distance = 0
elif self.topology.are_neighbors(u_p_xyz,v_p_xyz):
distance = 1
else:
distance = manhattan_distance(u_p_xyz,v_p_xyz)
t[u_p].append(distance)
tP_min[u_p] = sum(t[u_p])
return tP_min, t
if flag:
partition, partition_graph = create_partition_graph(task_graph,processor_graph)
else:
partition_graph = task_graph
return_mapping = {}
sum_min, tP = compute_tP()
#v_0_c = max(partition_graph.degree(None,'weight').items(),key=lambda item:item[1])[0]
v_0_c = max(partition_graph.degree(None,'weight'),key=lambda item:item[1])[0]
v_0_p = min(sum_min.items(),key=lambda item:item[1])[0]
sum_c = [0 for i in range(len(partition_graph))]
sum_p = [1 for i in range(len(partition_graph))]
for i in range(len(partition_graph)):
sum_c[v_0_c] = -1
sum_p[v_0_p] = sys.maxsize
mapping[v_0_c] = self.topology.to_xyz(v_0_p)
for w in partition_graph[v_0_c]:
if sum_c[w] >= 0:
sum_c[w] = sum_c[w] + partition_graph.get_edge_data(v_0_c,w)['weight']
v_0_c = sum_c.index(max(sum_c))
for j in range(len(processor_graph)):
if sum_p[j] < sys.maxsize:
sum_p[j] = 0
for w in partition_graph[v_0_c]:
if sum_c[w] < 0:
sum_p[j] = sum_p[j] + partition_graph.get_edge_data(v_0_c,w)['weight'] * tP[j][self.topology.to_idx(mapping[w])]
v_0_p = sum_p.index(min(sum_p))
if flag:
for i in range(len(mapping)):
for elem in partition[1][i]:
return_mapping[elem] = mapping[i]
return return_mapping
else:
return mapping
class Greedy_Graph_Embedding(Mapping):
"""Class for greedy graph embedding mapping"""
def map(self):
task_graph = self.process_graph
processor_graph = nx.Graph() #create a topology graph using info about each node`s neighbours
for elem in range(len(self.topology)):
x,y,z = self.topology.to_xyz(elem)
for neighbour in self.topology.neighbours(x,y,z):
processor_graph.add_edge(elem,self.topology.to_idx(neighbour)) #if two nodes are neighbours, we add an edge between them to our topo_graph
dijkstra_path = dict(nx.all_pairs_dijkstra_path_length(processor_graph))
flag = False #false if without partition, true - with
def find_new_start(C,s):#we are lookong for the nearest node to s that is free for allocating, i.e. has C(s)>0
left,right = 0,0
while True:
if C[s-left]==1:
return s-left
elif C[s+right]==1:
return s+right
if s-left>0:
left += 1
if s+right<len(C)-1:
right += 1
def create_partition_graph(tgraph,pgraph):#we create a partition of a given app_graph to len(graph)-chunks,
#several tasks that communicate tightly are combined into one block, an edge between each block has weight equal to sum of all edges between all tasks in two different blocks
#
partition = nxmetis.partition(tgraph,len(pgraph),None,None,'weight',None,None,None,True)
partition_graph = nx.Graph()
partition_graph.add_nodes_from(pgraph.nodes())
for i in range(len(partition[1])):
for j in range(len(partition[1])):
if j != i:
val = 0
for elem_from_i in partition[1][i]:
for elem_from_j in partition[1][j]:
if tgraph.has_edge(elem_from_i,elem_from_j):
val += tgraph.get_edge_data(elem_from_i,elem_from_j)['weight']
if val>0:
partition_graph.add_edge(i,j,weight=val)
return partition, partition_graph
def dijkstra_closest_vertex(C,start): #here we are looking for the nearest node to our start position
#that has C[s]>0
min_distance = float("inf")
for elem in dijkstra_path[start]:
if elem != start and C[elem]>0:
if dijkstra_path[start][elem] < min_distance:
min_distance,min_index = dijkstra_path[start][elem],elem
return min_index
def greedy_graph_embedding_algo(tgraph,pgraph):
mapping = {}
task_set = set(tgraph.nodes())
C = [1 for i in range(len(pgraph))]
queue = []
start_node = list(pgraph.nodes())[len(pgraph.nodes())//2] #one from the center ID`s
#here we sort all vertices according the weights of their out edges
queue_of_heaviest_in_S = [i[0] for i in sorted(tgraph.degree(None,'weight'),key=lambda item:item[1],reverse=True)]
#queue_of_heaviest_in_S = [i[0] for i in sorted(tgraph.degree(None,'weight'),key=lambda item:item[1],reverse=True)]
while len(task_set)!=0:
vertex_m = queue_of_heaviest_in_S.pop(0)
if C[start_node] == 0:
start_node = find_new_start(C,start_node)
mapping[vertex_m] = self.topology.to_xyz(start_node)
task_set.remove(vertex_m)
C[start_node] = 0
for u in tgraph[vertex_m]:
if u in task_set:
queue.append((vertex_m,u,tgraph.get_edge_data(vertex_m,u)))
#here we sort the queue according the weights of each edge
queue = sorted(queue,key=lambda x:x[2]['weight'],reverse=True)
while len(queue)!=0:
heaviest_edge_in_Q = queue.pop(0)
if C[start_node] == 0:
start_node = dijkstra_closest_vertex(C,start_node)
_,vertex_m = heaviest_edge_in_Q[0],heaviest_edge_in_Q[1]
mapping[vertex_m] = self.topology.to_xyz(start_node)
task_set.remove(vertex_m)
queue_of_heaviest_in_S.remove(vertex_m)
C[start_node] = 0
#here we have to check whether our u in task_set or not
#if not, there can be a situation, when an edge with its neighbour is already added into queue,
#hence we have to delete it from the queue in order not consider it in the future (because we already have allocated these two nodes onto topology nodes)
for u in tgraph[vertex_m]:
if u in task_set:
queue.append((vertex_m,u,tgraph.get_edge_data(vertex_m,u)))
elif (vertex_m,u,tgraph.get_edge_data(vertex_m,u)) in queue or (u,vertex_m,tgraph.get_edge_data(vertex_m,u)) in queue:
try:
queue.remove((vertex_m,u,tgraph.get_edge_data(vertex_m,u)))
except ValueError:
queue.remove((u,vertex_m,tgraph.get_edge_data(vertex_m,u)))
queue = sorted(queue,key=lambda x:x[2]['weight'],reverse=True)
return mapping
if flag:
partition, partition_graph = create_partition_graph(task_graph,processor_graph)
maps = greedy_graph_embedding_algo(partition_graph,processor_graph)
return_mapping = {}
for i in range(len(maps)):
for elem in partition[1][i]:
return_mapping[elem]=maps[i]
return return_mapping
else:
maps = greedy_graph_embedding_algo(task_graph,processor_graph)
return maps
#returned mapping (maps) will consist of tuples: id of block and coordinates of topology nodes
#however each block can consist of several tasks ids, respectively
#so, now we create a new dictionar: id of task, coordinates of topo node
class Recursive_bipartitioning(Mapping):
"""Class for the recursive bipartitioning mapping"""
def map(self):
mapping={}
task_graph = self.process_graph
processors_set = {i for i in range(len(self.topology))}
processor_graph = nx.Graph()
for elem in range(len(self.topology)):
x,y,z = self.topology.to_xyz(elem)
for neighbour in self.topology.neighbours(x,y,z):
processor_graph.add_edge(elem,self.topology.to_idx(neighbour))
len_graph_part = 0
def node_partition(set_of_nodes):#if our topo is represented as a set
if isinstance(set_of_nodes,set):
set_of_nodes = list(set_of_nodes)
global len_graph_part
half = len_graph_part
return set_of_nodes[:half], set_of_nodes[half:]
def graph_node_partition(topology_graph):#proper case, if our topo is a graph
partition = nxmetis.partition(topology_graph,2,None,None,None,None,None,None,True)
# LLP: checking if the partitions have the same size
# if that is not the case, we move items from the biggest to the smallest until equilibrium is met
if(len(partition[1][0]) != len(partition[1][1])):
left_is_smallest = len(partition[1][0]) < len(partition[1][1]) # Gets who is the smallest one
small, big = ((partition[1][1], partition[1][0]), (partition[1][0], partition[1][1]))[left_is_smallest] # calls the smallest as "small"
while len(small) < len(big):
small.append(big.pop(0))
left_graph = nx.Graph()
left_graph.add_nodes_from(partition[1][0])
for u in partition[1][0]:
for v in partition[1][0]:
if processor_graph.has_edge(u,v):
left_graph.add_edge(u,v)
right_graph = nx.Graph()
right_graph.add_nodes_from(partition[1][1])
for u in partition[1][1]:
for v in partition[1][1]:
if processor_graph.has_edge(u,v):
right_graph.add_edge(u,v)
return left_graph,right_graph
def graph_partition(communication_graph):
partition = nxmetis.partition(communication_graph,2,None,None,'weight',None,None,None,True)
# LLP: checking if the partitions have the same size
# if that is not the case, we move items from the biggest to the smallest until equilibrium is met
if(len(partition[1][0]) != len(partition[1][1])):
left_is_smallest = len(partition[1][0]) < len(partition[1][1]) # Gets who is the smallest one
small, big = ((partition[1][1], partition[1][0]), (partition[1][0], partition[1][1]))[left_is_smallest] # calls the smallest as "small"
while len(small) < len(big):
small.append(big.pop(0))
left_graph=nx.Graph()
left_graph.add_nodes_from(partition[1][0])
for u in partition[1][0]:
for v in partition[1][0]:
if task_graph.has_edge(u,v):
val = task_graph.get_edge_data(u,v)['weight']
left_graph.add_edge(u,v,weight=val)
right_graph=nx.Graph()
right_graph.add_nodes_from(partition[1][1])
for u in partition[1][1]:
for v in partition[1][1]:
if task_graph.has_edge(u,v):
val = task_graph.get_edge_data(u,v)['weight']
right_graph.add_edge(u,v,weight=val)
global len_graph_part
len_graph_part = len(left_graph)
return left_graph,right_graph
def bipartitioning_mapping(communication_graph,set_of_nodes):
if (len(set_of_nodes)==1):
if len(task_graph) >= 256:
mapping[communication_graph.nodes()[0]]=self.topology.to_xyz(set_of_nodes[0]) #if our topology is just a SET = {0,..,63}, for example, of allocated to app nodes
else:
mapping[list(communication_graph.nodes)[0]]=self.topology.to_xyz(list(set_of_nodes.nodes)[0]) #if our topology is a graph with edges between each node
return
G_1, G_2 = graph_partition(communication_graph)
if len(task_graph) < 256:
N_1,N_2 = graph_node_partition(set_of_nodes) #if topoloogy is a set
else:
N_1,N_2 = node_partition(set_of_nodes) #if topolog is a graph
bipartitioning_mapping(G_1,N_1)
bipartitioning_mapping(G_2,N_2)
if len(task_graph) < 256:
bipartitioning_mapping(task_graph,processor_graph) #if topology is a graph
else:
bipartitioning_mapping(task_graph,processors_set) # if topology is a set
return mapping
class Topology_aware(Mapping):
"""Class for the topology aware mapping"""
def map(self):
mapping={}
if len(self.process_graph) > len(self.topology):
raise ValueError("Not applicable for #proc > #nodes")
task_set = set(self.process_graph.nodes())
processor_set = {i for i in range(len(self.topology))}
processor_set_copy = copy.copy(processor_set)
task_set_closed = set()
N = len(task_set)
f_est_min = {}
def _to_xyz(index,dim):
dx ,dy , dz = dim
x = (index % (dx*dy)) % dx
y = (index % (dx*dy)) // dx
z = index // (dx*dy)
return (x,y,z)
def estimation_func(task,processor):#here we calculate a distance as follows:
#distance among neighborhood is equal to 1, otherwise, we find a manhattan distance between two processors
first_approx=0
#processor_xyz = self.topology.to_xyz(processor)
processor_xyz = _to_xyz(processor,self.topology.dim)
for tj in task_set_closed:
if self.process_graph.has_edge(task,tj):
if self.topology.are_neighbors(processor_xyz,mapping[tj]):
distance = 1
else:
distance = manhattan_distance(processor_xyz,mapping[tj])
first_approx += self.process_graph.get_edge_data(task,tj)['weight']*distance
return first_approx
def estimation_func_second_approx(task,processor):
second_approx = 0
sum_distance = 0
processor_xyz = self.topology.to_xyz(processor)
for pj in processor_set_copy:
pj_xyz = self.topology.to_xyz(pj)
if self.topology.are_neighbors(processor_xyz,pj_xyz):
distance = 1
else:
distance = manhattan_distance(processor_xyz,pj_xyz)
sum_distance += distance
for tj in task_set:
if self.process_graph.has_edge(task,tj):
second_approx += self.process_graph.get_edge_data(task,tj)['weight']
first_approx=0
for tj in task_set_closed:
if self.process_graph.has_edge(task,tj):
if self.topology.are_neighbors(processor_xyz,mapping[tj]):
distance = 1
else:
distance = manhattan_distance(processor_xyz,mapping[tj])
first_approx += self.process_graph.get_edge_data(task,tj)['weight']*distance
return first_approx+second_approx*sum_distance/N
def estimation_func_third_approx(task,processor):
second_approx = 0
sum_distance = 0
processor_xyz = self.topology.to_xyz(processor)
for pj in processor_set_copy:
pj_xyz = self.topology.to_xyz(pj)
if self.topology.are_neighbors(processor_xyz,pj_xyz):
distance = 1
else:
distance = manhattan_distance(processor_xyz,pj_xyz)
sum_distance += distance
for tj in task_set:
if self.process_graph.has_edge(task,tj):
second_approx += self.process_graph.get_edge_data(task,tj)['weight']
first_approx = 0
sum_third_distance = 0
for pj in processor_set:
pj_xyz = self.topology.to_xyz(pj)
if self.topology.are_neighbors(processor_xyz,pj_xyz):
distance = 1
else:
distance = manhattan_distance(processor_xyz,pj_xyz)
sum_third_distance += distance
sum_third_distance = sum_third_distance/len(processor_set)
for tj in task_set_closed:
if self.process_graph.has_edge(task,tj):
first_approx += self.process_graph.get_edge_data(task,tj)['weight']
return first_approx*sum_third_distance+second_approx*sum_distance/N
def criticality(task,k):
min_estimation = float("inf")
sum_criticality = 0
for p in processor_set:
f_est = estimation_func(task,p)#the fastest one
#f_est = estimation_func_second_approx(task,p)
#f_est = estimation_func_third_approx(task,p)#the lowest approximation
if f_est < min_estimation:
min_estimation = f_est
f_est_min[task] = p
sum_criticality += f_est
return sum_criticality/(N-k)-min_estimation
for k in range(N):
max_criticality = -float("inf")
for task in task_set:
critical_t = criticality(task,k)
if critical_t > max_criticality:
tk = task
max_criticality = critical_t
pk = f_est_min[tk]
mapping[tk]=self.topology.to_xyz(pk)
task_set_closed.add(tk)
task_set.remove(tk)
processor_set.remove(pk)
return mapping
class Random(Mapping):
"""Class for the random sampling mapping"""
def map(self):
best = 0
mapping = {}
num_nodes, num_procs= len(self.topology), len(self.process_graph)
for i in range(10000):
current = {process: self.topology.to_xyz(node) for process, node in enumerate(sample(range(num_nodes), num_procs))}
if self.cardinality(current) > best:
mapping, best = current, self.cardinality(current)
return mapping
class MinimumManhattanDistance(Mapping):
"""Class for the minimum manhattan distance mapping"""
def map(self):
mapping = {}
def helper(edge):
"""
Helper function for sorting of process pairs.
:param edge: Process pair
:return: Sorting value
"""
_, _, data = edge
if 'weight' in data:
return data['weight']
else:
# HAEC Communications Models (Internal Documentation)
# 2.6 Influence of Message Size on Transfer Time
if data['size']/data['count'] > 7295:
return (0, data['size'])
else:
return (1, data['size'])
edge_list = sorted(self.process_graph.edges(data=True),
key=helper, reverse=True)
start_node = tuple((i // 2 for i in self.topology.dim))
procs_on_node = {}
def bfs(root):
"""
Breadth-first search for a free node
:param root: Start node
:return: Next free node
"""
max_procs = 1
def is_full(pos):
return procs_on_node.get(pos, 0) >= max_procs
while True:
if not is_full(root):
procs_on_node[root] = procs_on_node.get(root, 0) + 1
return root
_open = deque([root])
closed = set([root])
while len(_open) > 0:
x, y, z = _open.pop()
for neighbour in self.topology.neighbours(x, y, z):
if not is_full(neighbour):
procs_on_node[neighbour] = procs_on_node.get(neighbour, 0) + 1
return neighbour
if neighbour not in closed:
closed.add(neighbour)
_open.append(neighbour)
max_procs += 1 # increase max_proc if no place is found
while len(edge_list) > 0:
sender, receiver, weight = edge_list.pop()
if sender in mapping:
if receiver in mapping:
continue
mapping[receiver] = bfs(mapping[sender])
elif receiver in mapping:
mapping[sender] = bfs(mapping[receiver])
else:
mapping[sender] = bfs(start_node)
mapping[receiver]= bfs(mapping[sender])
return mapping
class SpaceFillingCurve(Mapping):
"""Base class for space-filling curves"""
def map(self):
mapping = {}
spc_path = self.path(*self.topology.size)
process_list = self.process_list(self.process_graph, self.topology, self.block)
for sublist in process_list:
try:
cord = next(spc_path)
except StopIteration: # if num proc > num nodes --> get new generator
spc_path = self.path(*self.topology.size)
cord = next(spc_path)
for proc in sublist:
mapping[proc] = cord
return mapping
def split_evenly(self, _list, num_bins):
"""
Split a list into evenly sized number of bins
:param _list: List
:param num_bins: Number of bins
:return: Splitted list
"""
index, count = 0, len(_list)
bin_size, modulo = count // num_bins, count % num_bins
for i in range(num_bins):
step = bin_size + 1 if i < modulo else bin_size
yield _list[index:index+step]
index += step
def process_list(self, process_graph, topology, block):
"""
Returns the processes in the process graph either grouped or alone.
:param process_graph: Process graph
:param topology: Topology
:param block: Block assignment
:return:
"""
process_list = range(len(process_graph))
if block:
return self.split_evenly(process_list, len(topology))
else:
return [[proc] for proc in process_list]
def check_dimensions(self, dx, dy, dz):
"""
Checks if dimensions are appropriate for SFC
:param dx: Number of nodes in x direction
:param dy: Number of nodes in y direction
:param dz: Number of nodes in z direction
"""
def is_power_of_2(num):
return (num & (num-1)) == 0 and num != 0
if dx != dy or dx != dz:
raise ValueError("Mapping works only for equal dx, dy, dz.")
if not is_power_of_2(dx):
raise ValueError("Mapping works only if dx, dy, dz are a powers of 2.")
class Sweep(SpaceFillingCurve):
"""Class for the Sweep space-filling curve"""
def path(self, dx, dy, dz):
"""Returns a generator for the Sweep SFC
:param dx: Number of nodes in x direction
:param dy: Number of nodes in y direction
:param dz: Number of nodes in z direction
:return: Sweep SFC generator
"""
for z in range(dz):
for y in range(dy):
for x in range(dx):
yield (x, y, z)
class Scan(SpaceFillingCurve):
"""Class for the Scan space-filling curve"""
def path(self, dx, dy, dz):
"""Returns a generator for the Scan SFC
:param dx: Number of nodes in x direction
:param dy: Number of nodes in y direction
:param dz: Number of nodes in z direction
:return: Scan SFC generator
"""
x_flip, y_flip = False, False
for z in range(dz):
for y in range(dy-1, -1, -1) if y_flip else range(dy):
for x in range(dx-1, -1, -1) if x_flip else range(dx):
yield (x, y, z)
x_flip = not x_flip
y_flip = not y_flip
#What I added
class Diagonal(SpaceFillingCurve):
"""Class for the Diagonal space-filling curve (zig zag)"""
def path(self, dx, dy, dz, direction=True):
"""Returns a generator for the Diagonal SFC
:param dx: Number of nodes in x direction
:param dy: Number of nodes in y direction
:param dz: Number of nodes in z direction
:param direction: A direction of a zigzag line-
True: first node after start (0,0,0) position is on Oy axe
False: first node after start (0,0,0) position is on Ox axe
:return: Diagonal SFC generator
"""
for z_coord in range(dz):
x_coord,y_coord = 0,0
dimension = dx*dy
while True:
even = ((x_coord+y_coord)%2 == 0)
yield (x_coord,y_coord,z_coord)
if (even == direction):
x_coord-=1
y_coord+=1
if (y_coord==dy):
y_coord-=1
x_coord+=2
if (x_coord<0):
x_coord=0
else:
x_coord+=1
y_coord-=1
if (x_coord==dx):
x_coord-=1
y_coord+=2
if (y_coord<0):
y_coord=0
dimension-=1
if (dimension<=0):
break
class Peano(SpaceFillingCurve):
"""Class for the Peano space-filling curve"""
def path(self, dx, dy, dz):
"""Returns a generator for the Peano SFC
:param dx: Number of nodes in x direction
:param dy: Number of nodes in y direction
:param dz: Number of nodes in z direction
:return: Peano SFC generator
"""
self.check_dimensions(dx, dy, dz)
def peano(o, u, v, w):
if u.norm < 1 and v.norm < 1 and w.norm < 1:
yield o.value
else:
for p in [peano(o, u//2, v//2, w//2),
peano(o+u-u//2, u//2, v//2, w//2),
peano(o+v-v//2, u//2, v//2, w//2),
peano(o+u-u//2+v-v//2, u//2, v//2, w//2),
peano(o+w-w//2, u//2, v//2, w//2),
peano(o+u-u//2+w-w//2, u//2, v//2, w//2),
peano(o+v-v//2+w-w//2, u//2, v//2, w//2),
peano(o+u-u//2+v-v//2+w-w//2, u//2, v//2, w//2)]:
for i in p:
yield i
o = Vector(0, 0, 0)
u = Vector(dx-1, 0, 0)
v = Vector(0, dy-1, 0)
w = Vector(0, 0, dz-1)
return peano(o, u, v, w)
class Gray(SpaceFillingCurve):
"""Class for the Gray space-filling curve"""
def path(self, dx, dy, dz):
"""Returns a generator for the Gray SFC
:param dx: Number of nodes in x direction
:param dy: Number of nodes in y direction
:param dz: Number of nodes in z direction
:return: Gray SFC generator
"""
self.check_dimensions(dx, dy, dz)
def gray(o, u, v, w):
if u.norm < 1 and v.norm < 1 and w.norm < 1:
yield o.value
else:
for g in [gray(o, u//2, v//2, w//2),
gray(o+u+v//2, -u//2, -v//2, w//2),
gray(o+u+w, -u//2, v//2, -w//2),
gray(o+v//2+w, u//2, -v//2, -w//2),
gray(o+v+w, u//2, -v//2, -w//2),
gray(o+u+v-v//2+w, -u//2, v//2, -w//2),
gray(o+u+v, -u//2, -v//2, w//2),
gray(o+v-v//2, u//2, v//2, w//2)]:
for i in g:
yield i
o = Vector(0, 0, 0)
u = Vector(dx-1, 0, 0)
v = Vector(0, dy-1, 0)
w = Vector(0, 0, dz-1)
return gray(o, u, v, w)
class Hilbert(SpaceFillingCurve):
"""Class for the Hilbert space-filling curve"""
def path(self, dx, dy, dz):
"""
Returns a generator for the Hilbert SFC
:param dx: Number of nodes in x direction
:param dy: Number of nodes in y direction
:param dz: Number of nodes in z direction
:return: Hilbert SFC generator
"""
"""after ref08.pdf, page 110, Fig. 8.2, right
"""
self.check_dimensions(dx, dy, dz)
def hilbert(o, u, v, w):
if u.norm < 1 and v.norm < 1 and w.norm < 1:
yield o.value
else:
for h in [hilbert(o, w//2, u//2, v//2),
hilbert(o+v-v//2, v//2, w//2, u//2),
hilbert(o+u-u//2+v-v//2, v//2,w//2,u//2),
hilbert(o+u+v//2, -u//2, -v//2, w//2),
hilbert(o+u+v//2+w-w//2, -u//2,-v//2,w//2),
hilbert(o+u+v-v//2+w, v//2, -w//2, -u//2),
hilbert(o+u//2+v-v//2+w, v//2,-w//2,-u//2),
hilbert(o+v//2+w, -w//2, u//2, -v//2)]:
for i in h:
yield i
o = Vector(0, 0, 0)
u = Vector(dx-1, 0, 0)
v = Vector(0, dy-1, 0)
w = Vector(0, 0, dz-1)
return hilbert(o, u, v, w)
import time
class Bokhari(Mapping):
"""Class which represents Bokhari's mapping"""
def get_initial_mapping(self, dim):
"""
Returns an initial mapping (Sweep SFC) for the algorithm
:param dim: Dimensions of topology
:return: Mapping
"""
dx, dy, dz = dim
return {i: (x, y, z) for i, (x, y, z)
in enumerate(product(range(dx), range(dy), range(dz)))}
def map(self):
if len(self.process_graph) > len(self.topology):
raise ValueError("Not applicable for #proc > #nodes")
mapping = self.get_initial_mapping(self.topology.dim)
best, done = mapping.copy(), False
self.logger.info('Initial cardinality: %d' % self.cardinality(mapping))
while not done:
augmented, count = False, 0
while True:
augmented = self.augment(mapping)
self.logger.info('Cardinality - Round %d: %d' % (count, self.cardinality(mapping)))
if not augmented:
break
count += 1
if self.cardinality(mapping) < self.cardinality(best):
done = True
else:
best = mapping.copy()
self.jump(mapping)
self.logger.info('Cardinality best: %d' % self.cardinality(best))
return best
def gain(self, u, v, old, new):
"""
Returns the gain of the swap of two processes
:param u: Node u
:param v: Node v
:param old: Old mapping
:param new: New mapping
:return: Gain in cardinality
"""
u_neighbors = self.process_graph.neighbors(u)
v_neighbors = self.process_graph.neighbors(v)
old_card = sum([1 for i in u_neighbors if self.topology.are_neighbors(old[u], old[i])]) + sum([1 for j in v_neighbors if self.topology.are_neighbors(old[v], old[j])])
new_card = sum([1 for i in u_neighbors if self.topology.are_neighbors(new[u], new[i])]) + sum([1 for j in v_neighbors if self.topology.are_neighbors(new[v], new[j])])
return new_card - old_card
def augment(self, mapping):
"""
Augments mapping based on pairwise swaps
:param mapping: Initial mapping
:return: Augmented mapping
"""
improved = False
for i, u in enumerate(self.process_graph):
if i % 100 == 0:
self.logger.info('Augment u: %d' % i)
ex_pair, max_gain = None, 0
for j, v in enumerate(self.process_graph):
tmp_mapping = mapping.copy()
if u == v:
continue
tmp_mapping[u], tmp_mapping[v] = tmp_mapping[v], tmp_mapping[u]
gain = self.gain(u, v, mapping, tmp_mapping)
if gain >= max_gain:
ex_pair, max_gain = (u, v), gain
if max_gain > 0:
improved = True
if ex_pair is not None:
u, v = ex_pair
mapping[u], mapping[v] = mapping[v], mapping[u]
return improved
def jump(self, mapping):
for i in range(max(self.topology.dim)):
u, v = sample(range(len(self.process_graph)), 2)
mapping[u], mapping[v] = mapping[v], mapping[u]
class StateSpaceSearch(Mapping):
"""
Defines a base class and methods for a state-space search
"""
def map(self):
_open, closed = [self.SearchNode(self, dict())], set()
while len(_open) != 0:
node = heappop(_open)
if node not in closed:
closed.add(node)
if self.is_goal(node):
return node.mapping
for s in self.succ(node):
heappush(_open, s)
raise ValueError("StateSpaceSearch: Not found a solution")
class SearchNode():
def __init__(self, outer, mapping):
self.outer = outer
self.mapping = mapping
def __eq__(self, other):
return set(self.mapping.items()) == set(other.mapping.items())
def __lt__(self, other):
g_self, g_other = self.outer.g(self.mapping), self.outer.g(other.mapping)
h_self, h_other = self.outer.h(self.mapping), self.outer.h(other.mapping)
if g_self + h_self == g_other + h_other:
return h_self < h_other
else:
return g_self + h_self < g_other + h_other
def __hash__(self):
return hash(frozenset(self.mapping.items()))
def md(self, u, v):
return sum([abs(i - j) for i, j in zip(u, v)])
def is_goal(self, node):
return set(self.process_graph) == set(node.mapping)
class Genetic(Mapping):
population_size = 100
generations = 10
def fitness(self, string):
"""
Returns the fitness of a string.
:param string: String
:return: Fitness
"""
_sum = 0
for u, v in self.process_graph.edges():
cords_u = self.topology.to_xyz(string[u])
cords_v = self.topology.to_xyz(string[v])
if self.topology.are_neighbors(cords_u, cords_v):
_sum += 1
return _sum
def selection(self, population):
"""
Selects a candidate in the population (roulette wheel selection)
After: http://stackoverflow.com/questions/10324015/fitness-proportionate-selection-roulette-wheel-selection-in-python
:param population: Candidate list
:return: Candidate
"""
_sum = sum(population.values())
pick = uniform(0, _sum)
current = 0
for string, fitness in population.items():
current += fitness
if current > pick:
return string
def crossover(self, a, b):
"""
Executes a an ordered crossover
After: Wonjae Lee and Hak-Young Kim, "Genetic algorithm implementation in Python," Fourth Annual ACIS International Conference on Computer and Information Science (ICIS'05), 2005, pp. 8-11.
:param a: Parent
:param b: Parent
:return: Offspring
"""
a, b = list(a), list(b)
p, q = sample(range(1, len(self.topology)-1), 2)
p, q = min(p, q), max(p, q)
cross_a, cross_b = a[p:q], b[p:q]
remain_a, remain_b = a[q:] + a[:q], b[q:] + b[:q]
filter_a = [i for i in remain_a if i not in cross_b]
filter_b = [i for i in remain_b if i not in cross_a]
c = filter_a[-p:] + cross_b + filter_a[:len(a)-q]
d = filter_b[-p:] + cross_a + filter_b[:len(b)-q]
return tuple(c), tuple(d)
def mutation(self, a):
"""
Implements a mutation in a string
:param a: String
:return:
"""
a = list(a)
p = randint(0, len(self.topology) - 1)
q = randint(0, len(self.topology) - 1)
a[p], a[q] = a[q], a[p]
return tuple(a)
def create_generation(self, population):
"""
Creates a new generation based on a population
:param population: Population
:return: Generation
"""
generation, size = {}, len(population)
while len(generation) < size and (time.time()-self.t < 10800):
a, b = self.selection(population), self.selection(population)
c, d = self.crossover(a, b)
c = self.mutation(c)
d = self.mutation(d)
fit_a, fit_b = self.fitness(a), self.fitness(b)
fit_c, fit_d = self.fitness(c), self.fitness(d)
if max(fit_a, fit_b) < fit_c:
generation[c] = fit_c
if max(fit_a, fit_b) < fit_d:
generation[d] = fit_d
return generation
def initial_population(self):
num_nodes = len(self.topology)
population = {}
for _ in range(self.population_size):
s = sample(range(num_nodes), num_nodes)
population[tuple(s)] = self.fitness(s)
return population
def to_mapping(self, string):
return {process: self.topology.to_xyz(node) for process, node in enumerate(string)}
def map(self):
population = self.initial_population()
self.t = time.time()
fittest = max(population, key=population.get)
print(self.fitness(fittest))
for i in range(self.generations):
print(i)
generation = self.create_generation(population)
if generation:
population = generation
fittest = max(population, key=population.get)
print(self.fitness(fittest))
return self.to_mapping(fittest)
| 32.312007 | 191 | 0.675952 |
54b3ea4921a9943aa7dc6ef93b12b518622432b0 | 94 | py | Python | apps/bouygue/apps.py | GuillaumeM92/La-Bouygue | a402efbc9746acb51cd7fc66ccdac4a45b854a22 | [
"MIT"
] | null | null | null | apps/bouygue/apps.py | GuillaumeM92/La-Bouygue | a402efbc9746acb51cd7fc66ccdac4a45b854a22 | [
"MIT"
] | null | null | null | apps/bouygue/apps.py | GuillaumeM92/La-Bouygue | a402efbc9746acb51cd7fc66ccdac4a45b854a22 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class BouygueConfig(AppConfig):
name = 'apps.bouygue'
| 15.666667 | 33 | 0.755319 |
c3c453b01f0596be0e274d3c915ed656e2d4a665 | 968 | py | Python | demos/marker.py | eliasdjo/PhiFlow | dc88dca696d25a5ea5793aa48fae390469f0d829 | [
"MIT"
] | null | null | null | demos/marker.py | eliasdjo/PhiFlow | dc88dca696d25a5ea5793aa48fae390469f0d829 | [
"MIT"
] | null | null | null | demos/marker.py | eliasdjo/PhiFlow | dc88dca696d25a5ea5793aa48fae390469f0d829 | [
"MIT"
] | null | null | null | """ Passive Markers
Fluid simulation with additional marker fields that are passively transported with the fluid.
The dense marker is sampled on a regular grid while the sparse marker is a collection of particles.
"""
from phi.flow import *
DOMAIN = dict(x=64, y=64, bounds=Box(x=100, y=100))
DT = 0.2
INITIAL_LOC = math.meshgrid(x=8, y=8).pack('x,y', instance('points')) * 10. + 10.
velocity = StaggeredGrid(Noise(vector=2, scale=100), 0, **DOMAIN) * 4
sparse_marker = PointCloud(Sphere(INITIAL_LOC, 2), 1, 0, bounds=DOMAIN['bounds'])
dense_marker = CenteredGrid(sparse_marker.elements, extrapolation.BOUNDARY, x=200, y=200, bounds=DOMAIN['bounds'])
for _ in view(framerate=10, play=False, namespace=globals()).range():
velocity, _ = fluid.make_incompressible(velocity)
dense_marker = advect.advect(dense_marker, velocity, DT)
sparse_marker = advect.advect(sparse_marker, velocity, DT)
velocity = advect.semi_lagrangian(velocity, velocity, DT)
| 40.333333 | 114 | 0.738636 |
6b78573564829886ae8f0ed3eafd5d405767f109 | 43,055 | py | Python | test/vanilla/Expected/AcceptanceTests/RequiredOptional/requiredoptional/aio/operations_async/_explicit_operations_async.py | daviwil/autorest.python | 20870e3870fcfeae9567b63343d2320bf388f3c6 | [
"MIT"
] | null | null | null | test/vanilla/Expected/AcceptanceTests/RequiredOptional/requiredoptional/aio/operations_async/_explicit_operations_async.py | daviwil/autorest.python | 20870e3870fcfeae9567b63343d2320bf388f3c6 | [
"MIT"
] | null | null | null | test/vanilla/Expected/AcceptanceTests/RequiredOptional/requiredoptional/aio/operations_async/_explicit_operations_async.py | daviwil/autorest.python | 20870e3870fcfeae9567b63343d2320bf388f3c6 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from ... import models
class ExplicitOperations:
"""ExplicitOperations async operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
async def post_required_integer_parameter(
self, body_parameter, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly required integer. Please put null and the client
library should throw before the request is sent.
:param body_parameter:
:type body_parameter: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
# Construct URL
url = self.post_required_integer_parameter.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body_parameter, 'int')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_required_integer_parameter.metadata = {'url': '/reqopt/requied/integer/parameter'}
async def post_optional_integer_parameter(
self, body_parameter=None, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly optional integer. Please put null.
:param body_parameter:
:type body_parameter: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
# Construct URL
url = self.post_optional_integer_parameter.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if body_parameter is not None:
body_content = self._serialize.body(body_parameter, 'int')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_optional_integer_parameter.metadata = {'url': '/reqopt/optional/integer/parameter'}
async def post_required_integer_property(
self, value, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly required integer. Please put a valid int-wrapper with
'value' = null and the client library should throw before the request
is sent.
:param value:
:type value: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
body_parameter = models.IntWrapper(value=value)
# Construct URL
url = self.post_required_integer_property.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body_parameter, 'IntWrapper')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_required_integer_property.metadata = {'url': '/reqopt/requied/integer/property'}
async def post_optional_integer_property(
self, value=None, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly optional integer. Please put a valid int-wrapper with
'value' = null.
:param value:
:type value: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
body_parameter = None
if value is not None:
body_parameter = models.IntOptionalWrapper(value=value)
# Construct URL
url = self.post_optional_integer_property.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if body_parameter is not None:
body_content = self._serialize.body(body_parameter, 'IntOptionalWrapper')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_optional_integer_property.metadata = {'url': '/reqopt/optional/integer/property'}
async def post_required_integer_header(
self, header_parameter, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly required integer. Please put a header 'headerParameter'
=> null and the client library should throw before the request is sent.
:param header_parameter:
:type header_parameter: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
# Construct URL
url = self.post_required_integer_header.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['headerParameter'] = self._serialize.header("header_parameter", header_parameter, 'int')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_required_integer_header.metadata = {'url': '/reqopt/requied/integer/header'}
async def post_optional_integer_header(
self, header_parameter=None, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly optional integer. Please put a header 'headerParameter'
=> null.
:param header_parameter:
:type header_parameter: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
# Construct URL
url = self.post_optional_integer_header.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
if header_parameter is not None:
header_parameters['headerParameter'] = self._serialize.header("header_parameter", header_parameter, 'int')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_optional_integer_header.metadata = {'url': '/reqopt/optional/integer/header'}
async def post_required_string_parameter(
self, body_parameter, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly required string. Please put null and the client library
should throw before the request is sent.
:param body_parameter:
:type body_parameter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
# Construct URL
url = self.post_required_string_parameter.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body_parameter, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_required_string_parameter.metadata = {'url': '/reqopt/requied/string/parameter'}
async def post_optional_string_parameter(
self, body_parameter=None, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly optional string. Please put null.
:param body_parameter:
:type body_parameter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
# Construct URL
url = self.post_optional_string_parameter.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if body_parameter is not None:
body_content = self._serialize.body(body_parameter, 'str')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_optional_string_parameter.metadata = {'url': '/reqopt/optional/string/parameter'}
async def post_required_string_property(
self, value, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly required string. Please put a valid string-wrapper with
'value' = null and the client library should throw before the request
is sent.
:param value:
:type value: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
body_parameter = models.StringWrapper(value=value)
# Construct URL
url = self.post_required_string_property.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body_parameter, 'StringWrapper')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_required_string_property.metadata = {'url': '/reqopt/requied/string/property'}
async def post_optional_string_property(
self, value=None, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly optional integer. Please put a valid string-wrapper
with 'value' = null.
:param value:
:type value: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
body_parameter = None
if value is not None:
body_parameter = models.StringOptionalWrapper(value=value)
# Construct URL
url = self.post_optional_string_property.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if body_parameter is not None:
body_content = self._serialize.body(body_parameter, 'StringOptionalWrapper')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_optional_string_property.metadata = {'url': '/reqopt/optional/string/property'}
async def post_required_string_header(
self, header_parameter, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly required string. Please put a header 'headerParameter'
=> null and the client library should throw before the request is sent.
:param header_parameter:
:type header_parameter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
# Construct URL
url = self.post_required_string_header.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['headerParameter'] = self._serialize.header("header_parameter", header_parameter, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_required_string_header.metadata = {'url': '/reqopt/requied/string/header'}
async def post_optional_string_header(
self, body_parameter=None, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly optional string. Please put a header 'headerParameter'
=> null.
:param body_parameter:
:type body_parameter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
# Construct URL
url = self.post_optional_string_header.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
if body_parameter is not None:
header_parameters['bodyParameter'] = self._serialize.header("body_parameter", body_parameter, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_optional_string_header.metadata = {'url': '/reqopt/optional/string/header'}
async def post_required_class_parameter(
self, body_parameter, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly required complex object. Please put null and the client
library should throw before the request is sent.
:param body_parameter:
:type body_parameter: ~requiredoptional.models.Product
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
# Construct URL
url = self.post_required_class_parameter.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body_parameter, 'Product')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_required_class_parameter.metadata = {'url': '/reqopt/requied/class/parameter'}
async def post_optional_class_parameter(
self, body_parameter=None, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly optional complex object. Please put null.
:param body_parameter:
:type body_parameter: ~requiredoptional.models.Product
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
# Construct URL
url = self.post_optional_class_parameter.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if body_parameter is not None:
body_content = self._serialize.body(body_parameter, 'Product')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_optional_class_parameter.metadata = {'url': '/reqopt/optional/class/parameter'}
async def post_required_class_property(
self, value, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly required complex object. Please put a valid
class-wrapper with 'value' = null and the client library should throw
before the request is sent.
:param value:
:type value: ~requiredoptional.models.Product
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
body_parameter = models.ClassWrapper(value=value)
# Construct URL
url = self.post_required_class_property.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body_parameter, 'ClassWrapper')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_required_class_property.metadata = {'url': '/reqopt/requied/class/property'}
async def post_optional_class_property(
self, value=None, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly optional complex object. Please put a valid
class-wrapper with 'value' = null.
:param value:
:type value: ~requiredoptional.models.Product
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
body_parameter = None
if value is not None:
body_parameter = models.ClassOptionalWrapper(value=value)
# Construct URL
url = self.post_optional_class_property.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if body_parameter is not None:
body_content = self._serialize.body(body_parameter, 'ClassOptionalWrapper')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_optional_class_property.metadata = {'url': '/reqopt/optional/class/property'}
async def post_required_array_parameter(
self, body_parameter, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly required array. Please put null and the client library
should throw before the request is sent.
:param body_parameter:
:type body_parameter: list[str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
# Construct URL
url = self.post_required_array_parameter.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body_parameter, '[str]')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_required_array_parameter.metadata = {'url': '/reqopt/requied/array/parameter'}
async def post_optional_array_parameter(
self, body_parameter=None, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly optional array. Please put null.
:param body_parameter:
:type body_parameter: list[str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
# Construct URL
url = self.post_optional_array_parameter.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if body_parameter is not None:
body_content = self._serialize.body(body_parameter, '[str]')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_optional_array_parameter.metadata = {'url': '/reqopt/optional/array/parameter'}
async def post_required_array_property(
self, value, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly required array. Please put a valid array-wrapper with
'value' = null and the client library should throw before the request
is sent.
:param value:
:type value: list[str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
body_parameter = models.ArrayWrapper(value=value)
# Construct URL
url = self.post_required_array_property.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body_parameter, 'ArrayWrapper')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_required_array_property.metadata = {'url': '/reqopt/requied/array/property'}
async def post_optional_array_property(
self, value=None, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly optional array. Please put a valid array-wrapper with
'value' = null.
:param value:
:type value: list[str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
body_parameter = None
if value is not None:
body_parameter = models.ArrayOptionalWrapper(value=value)
# Construct URL
url = self.post_optional_array_property.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if body_parameter is not None:
body_content = self._serialize.body(body_parameter, 'ArrayOptionalWrapper')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_optional_array_property.metadata = {'url': '/reqopt/optional/array/property'}
async def post_required_array_header(
self, header_parameter, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly required array. Please put a header 'headerParameter'
=> null and the client library should throw before the request is sent.
:param header_parameter:
:type header_parameter: list[str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
# Construct URL
url = self.post_required_array_header.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['headerParameter'] = self._serialize.header("header_parameter", header_parameter, '[str]', div=',')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_required_array_header.metadata = {'url': '/reqopt/requied/array/header'}
async def post_optional_array_header(
self, header_parameter=None, *, custom_headers=None, raw=False, **operation_config):
"""Test explicitly optional integer. Please put a header 'headerParameter'
=> null.
:param header_parameter:
:type header_parameter: list[str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<requiredoptional.models.ErrorException>`
"""
# Construct URL
url = self.post_optional_array_header.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
if header_parameter is not None:
header_parameters['headerParameter'] = self._serialize.header("header_parameter", header_parameter, '[str]', div=',')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
post_optional_array_header.metadata = {'url': '/reqopt/optional/array/header'}
| 41.478805 | 136 | 0.672814 |
c3712f5a75516e8395dc82f0ac88f813dcac17e7 | 371 | py | Python | strings/palindrome/python/palindrome.py | avi-pal/al-go-rithms | 5167a20f1db7b366ff19f2962c1746a02e4f5067 | [
"CC0-1.0"
] | 1,253 | 2017-06-06T07:19:25.000Z | 2022-03-30T17:07:58.000Z | strings/palindrome/python/palindrome.py | rishabh99-rc/al-go-rithms | 4df20d7ef7598fda4bc89101f9a99aac94cdd794 | [
"CC0-1.0"
] | 554 | 2017-09-29T18:56:01.000Z | 2022-02-21T15:48:13.000Z | strings/palindrome/python/palindrome.py | rishabh99-rc/al-go-rithms | 4df20d7ef7598fda4bc89101f9a99aac94cdd794 | [
"CC0-1.0"
] | 2,226 | 2017-09-29T19:59:59.000Z | 2022-03-25T08:59:55.000Z | #We check if a string is palindrome or not
#Accept a string input
inputString = input("Enter any string:")
#Caseless Comparison
inputString = inputString.casefold()
#check if the string is equal to its reverse
if list(inputString) == list(reversed(inputString)):
print("Congrats! You typed in a PALINDROME!!")
else:
print("This is not a palindrome. Try Again.")
| 26.5 | 52 | 0.74124 |
265cd8682bfc71be0ec6557326ba18434768965f | 11,694 | py | Python | setup.py | amrali-eg/randomgen | ea45e16d4e8ff701a705f5f5ec3592f656170f39 | [
"NCSA",
"BSD-3-Clause"
] | 73 | 2018-03-28T19:40:23.000Z | 2022-02-06T18:30:17.000Z | setup.py | amrali-eg/randomgen | ea45e16d4e8ff701a705f5f5ec3592f656170f39 | [
"NCSA",
"BSD-3-Clause"
] | 209 | 2018-03-22T05:52:50.000Z | 2022-03-23T02:07:58.000Z | setup.py | amrali-eg/randomgen | ea45e16d4e8ff701a705f5f5ec3592f656170f39 | [
"NCSA",
"BSD-3-Clause"
] | 22 | 2018-05-22T11:21:19.000Z | 2022-02-28T03:27:48.000Z | from setuptools import Distribution, find_packages, setup
from setuptools.extension import Extension
from distutils.version import LooseVersion
import glob
import io
import os
from os.path import exists, join, splitext
import platform
import struct
import sys
from Cython.Build import cythonize
import Cython.Compiler.Options
import numpy as np
import versioneer
try:
from Cython import Tempita as tempita
except ImportError:
try:
import tempita
except ImportError:
raise ImportError("tempita required to install, use pip install tempita")
with open("requirements.txt") as f:
setup_required = f.read().splitlines()
install_required = [pkg for pkg in setup_required if "numpy" in pkg]
CYTHON_COVERAGE = os.environ.get("RANDOMGEN_CYTHON_COVERAGE", "0") in (
"true",
"1",
"True",
)
if CYTHON_COVERAGE:
print(
"Building with coverage for cython modules, "
"RANDOMGEN_CYTHON_COVERAGE=" + os.environ["RANDOMGEN_CYTHON_COVERAGE"]
)
LONG_DESCRIPTION = io.open("README.md", encoding="utf-8").read()
Cython.Compiler.Options.annotate = True
# Make a guess as to whether SSE2 is present for now, TODO: Improve
INTEL_LIKE = any(
[
val in k.lower()
for k in platform.uname()
for val in ("x86", "i686", "i386", "amd64")
]
)
machine_processor = platform.machine() + platform.processor()
ARM_LIKE = any([machine_processor.startswith(name) for name in ("arm", "aarch")])
if ARM_LIKE:
print("Processor appears to be ARM")
USE_SSE2 = INTEL_LIKE
print("Building with SSE?: {0}".format(USE_SSE2))
if "--no-sse2" in sys.argv:
USE_SSE2 = False
sys.argv.remove("--no-sse2")
MOD_DIR = "./randomgen"
def src_join(*fname):
return join(MOD_DIR, "src", join(*fname))
DEBUG = os.environ.get("RANDOMGEN_DEBUG", False) in (1, "1", "True", "true")
if DEBUG:
print("Debug build, RANDOMGEN_DEBUG=" + os.environ["RANDOMGEN_DEBUG"])
EXTRA_INCLUDE_DIRS = [np.get_include()]
EXTRA_LINK_ARGS = [] if os.name == "nt" else []
EXTRA_LIBRARIES = ["m"] if os.name != "nt" else []
# Undef for manylinux
EXTRA_COMPILE_ARGS = (
["/Zp16"] if os.name == "nt" else ["-std=c99", "-U__GNUC_GNU_INLINE__"]
)
UNDEF_MACROS = []
if os.name == "nt":
EXTRA_LINK_ARGS = ["/LTCG", "/OPT:REF", "Advapi32.lib", "Kernel32.lib"]
if DEBUG:
EXTRA_LINK_ARGS += ["-debug"]
EXTRA_COMPILE_ARGS += ["-Zi", "/Od"]
UNDEF_MACROS += ["NDEBUG"]
if sys.version_info < (3, 0):
EXTRA_INCLUDE_DIRS += [src_join("common")]
elif DEBUG:
EXTRA_COMPILE_ARGS += ["-g", "-O0"]
EXTRA_LINK_ARGS += ["-g"]
UNDEF_MACROS += ["NDEBUG"]
if Cython.__version__ >= LooseVersion("0.29"):
DEFS = [("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")]
else:
DEFS = [("NPY_NO_DEPRECATED_API", "0")]
if CYTHON_COVERAGE:
DEFS += [("CYTHON_TRACE", "1"), ("CYTHON_TRACE_NOGIL", "1")]
PCG64_DEFS = DEFS[:]
if sys.maxsize < 2 ** 32 or os.name == "nt":
# Force emulated mode here
PCG64_DEFS += [("PCG_FORCE_EMULATED_128BIT_MATH", "1")]
DSFMT_DEFS = DEFS[:] + [("DSFMT_MEXP", "19937")]
SFMT_DEFS = DEFS[:] + [("SFMT_MEXP", "19937")]
PHILOX_DEFS = DEFS[:] + [("R123_USE_PHILOX_64BIT", "1")]
RDRAND_COMPILE_ARGS = EXTRA_COMPILE_ARGS[:]
SSSE3_COMPILE_ARGS = EXTRA_COMPILE_ARGS[:]
AES_COMPILE_ARGS = EXTRA_COMPILE_ARGS[:]
if USE_SSE2:
if os.name == "nt":
EXTRA_COMPILE_ARGS += ["/wd4146", "/GL"]
if struct.calcsize("P") < 8:
EXTRA_COMPILE_ARGS += ["/arch:SSE2"]
SSSE3_COMPILE_ARGS = EXTRA_COMPILE_ARGS[:]
else:
EXTRA_COMPILE_ARGS += ["-msse2"]
RDRAND_COMPILE_ARGS = EXTRA_COMPILE_ARGS[:] + ["-mrdrnd"]
SSSE3_COMPILE_ARGS = EXTRA_COMPILE_ARGS[:] + ["-mssse3"]
AES_COMPILE_ARGS = EXTRA_COMPILE_ARGS[:] + ["-maes"]
DSFMT_DEFS += [("HAVE_SSE2", "1")]
SFMT_DEFS += [("HAVE_SSE2", "1")]
files = glob.glob("./randomgen/*.in") + glob.glob("./randomgen/legacy/*.in")
for templated_file in files:
output_file_name = splitext(templated_file)[0]
with open(templated_file, "r") as source_file:
template = tempita.Template(source_file.read())
processed = template.substitute().replace("\r\n", "\n")
contents = ""
if exists(output_file_name):
with open(output_file_name, "r") as output_file:
contents = output_file.read()
if contents != processed:
print("Processing {0} to {1}".format(templated_file, output_file_name))
with open(output_file_name, "w", newline="\n") as output_file:
output_file.write(processed)
extensions = []
for name in (
"bounded_integers",
"common",
"entropy",
"generator",
"legacy.bounded_integers",
"mtrand",
"_seed_sequence",
):
extra_source = []
extra_macros = []
extra_incl = []
source = ["randomgen/{0}.pyx".format(name.replace(".", "/"))]
legacy = name in ("legacy.bounded_integers", "mtrand")
if name in ("bounded_integers", "generator") or legacy:
extra_source = [src_join("distributions", "distributions.c")]
if name == "generator":
extra_source += [
src_join("distributions", "logfactorial.c"),
src_join("distributions", "hypergeometric.c"),
]
if legacy:
extra_source += [src_join("legacy", "legacy-distributions.c")]
extra_macros = [("RANDOMGEN_LEGACY", "1")]
elif name == "entropy":
extra_source = [src_join("entropy", "entropy.c")]
extra_incl = [src_join("entropy")]
ext = Extension(
"randomgen.{0}".format(name),
source + extra_source,
libraries=EXTRA_LIBRARIES,
include_dirs=EXTRA_INCLUDE_DIRS + extra_incl,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
define_macros=DEFS + extra_macros,
undef_macros=UNDEF_MACROS,
)
extensions.append(ext)
CPU_FEATURES = [src_join("common", "cpu_features.c")]
ALIGNED_MALLOC = [src_join("aligned_malloc", "aligned_malloc.c")]
def bit_generator(
name,
c_name=None,
aligned=False,
cpu_features=False,
defs=None,
compile_args=None,
extra_source=None,
):
c_name = name if c_name is None else c_name
defs = DEFS if defs is None else defs
sources = ["randomgen/{0}.pyx".format(name), src_join(c_name, c_name + ".c")]
if cpu_features:
sources += CPU_FEATURES
if aligned:
sources += ALIGNED_MALLOC
if extra_source is not None:
sources += [extra_source]
compile_args = EXTRA_COMPILE_ARGS if compile_args is None else compile_args
ext = Extension(
"randomgen.{0}".format(name),
sources,
include_dirs=EXTRA_INCLUDE_DIRS,
libraries=EXTRA_LIBRARIES,
extra_compile_args=compile_args,
extra_link_args=EXTRA_LINK_ARGS,
define_macros=defs,
undef_macros=UNDEF_MACROS,
)
extensions.append(ext)
bit_generator(
"aes",
c_name="aesctr",
cpu_features=True,
aligned=True,
compile_args=AES_COMPILE_ARGS,
)
bit_generator(
"chacha", cpu_features=True, aligned=True, compile_args=SSSE3_COMPILE_ARGS
)
bit_generator(
"dsfmt",
aligned=True,
defs=DSFMT_DEFS,
extra_source=src_join("dsfmt", "dSFMT-jump.c"),
)
bit_generator("hc128", c_name="hc-128")
bit_generator("jsf")
bit_generator("mt19937", extra_source=src_join("mt19937", "mt19937-jump.c"))
bit_generator("mt64")
bit_generator("pcg32")
# PCG requires special treatment since it contains multiple bit gens
ext = Extension(
"randomgen.pcg64",
["randomgen/pcg64.pyx"]
+ [
src_join("pcg64", "pcg64-common.c"),
src_join("pcg64", "pcg64-v2.c"),
src_join("pcg64", "lcg128mix.c"),
],
libraries=EXTRA_LIBRARIES,
include_dirs=EXTRA_INCLUDE_DIRS,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
define_macros=DEFS,
undef_macros=UNDEF_MACROS,
)
extensions.append(ext)
bit_generator("philox", defs=PHILOX_DEFS)
bit_generator("rdrand", cpu_features=True, compile_args=RDRAND_COMPILE_ARGS)
bit_generator(
"sfmt", aligned=True, defs=SFMT_DEFS, extra_source=src_join("sfmt", "sfmt-jump.c")
)
bit_generator(
"speck128",
c_name="speck-128",
cpu_features=True,
aligned=True,
compile_args=SSSE3_COMPILE_ARGS,
)
bit_generator("threefry")
bit_generator("xoroshiro128")
bit_generator("xorshift1024")
bit_generator("xoshiro256")
bit_generator("xoshiro512")
bit_generator("lxm")
bit_generator("sfc")
bit_generator("efiix64")
bit_generator("romu")
extensions.append(
Extension(
"randomgen.wrapper",
["randomgen/wrapper.pyx"],
include_dirs=EXTRA_INCLUDE_DIRS,
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
define_macros=DEFS,
undef_macros=UNDEF_MACROS,
)
)
extensions.append(
Extension(
"randomgen.tests._shims",
["randomgen/tests/_shims.pyx"],
include_dirs=EXTRA_INCLUDE_DIRS,
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
define_macros=DEFS,
undef_macros=UNDEF_MACROS,
)
)
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Financial and Insurance Industry",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"License :: OSI Approved",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Operating System :: Unix",
"Programming Language :: C",
"Programming Language :: Cython",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Adaptive Technologies",
"Topic :: Artistic Software",
"Topic :: Office/Business :: Financial",
"Topic :: Scientific/Engineering",
"Topic :: Security :: Cryptography",
]
class BinaryDistribution(Distribution):
def is_pure(self):
return False
setup(
name="randomgen",
version=versioneer.get_version(),
classifiers=classifiers,
cmdclass=versioneer.get_cmdclass(),
ext_modules=cythonize(
extensions,
compiler_directives={"language_level": "3", "linetrace": CYTHON_COVERAGE},
force=CYTHON_COVERAGE or DEBUG,
gdb_debug=DEBUG,
),
packages=find_packages(),
package_dir={"randomgen": "./randomgen"},
package_data={
"": ["*.h", "*.pxi", "*.pyx", "*.pxd", "*.in"],
"randomgen.tests.data": ["*.csv"],
},
include_package_data=True,
license="NCSA",
author="Kevin Sheppard",
author_email="kevin.k.sheppard@gmail.com",
distclass=BinaryDistribution,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
description="Random generator supporting multiple PRNGs",
url="https://github.com/bashtage/randomgen",
keywords=[
"pseudo random numbers",
"PRNG",
"RNG",
"RandomState",
"random",
"random numbers",
"parallel random numbers",
"PCG",
"XorShift",
"dSFMT",
"MT19937",
"Random123",
"ThreeFry",
"Philox",
"ChaCha",
"AES",
"SPECK",
"RDRAND",
],
zip_safe=False,
install_requires=install_required,
setup_requires=setup_required,
python_requires=">=3.6",
)
| 29.831633 | 86 | 0.652642 |
657ea0f855705d9bc844177d56bebadcee50ddc1 | 1,438 | py | Python | pyinsteon/groups/on_off.py | pyinsteon/pyinsteon | 1ecc2ed3c2fc4bc333d76d9238ffdcf44e7efdd5 | [
"MIT"
] | 15 | 2020-07-08T05:29:14.000Z | 2022-03-24T18:56:26.000Z | pyinsteon/groups/on_off.py | bshep/pyinsteon | 7025cb6f3beb42f43c6a13799eff0a6b77e38e69 | [
"MIT"
] | 107 | 2019-06-03T09:23:02.000Z | 2022-03-31T23:12:38.000Z | pyinsteon/groups/on_off.py | teharris1/pyinsteon | 9476473676d714a62f0cfcc5124f7cd7e96de98b | [
"MIT"
] | 16 | 2019-01-24T01:09:49.000Z | 2022-02-24T03:48:42.000Z | """On / Off state."""
from ..address import Address
from .group_base import GroupBase
class OnOff(GroupBase):
"""On / Off state."""
def __init__(
self, name: str, address: Address, group: int = 0, default: int = None
):
"""Init the OnLevel class."""
super().__init__(name, address, group, default, value_type=int)
# pylint: disable=arguments-differ
def set_value(self, on_level):
"""Set the value of the state from the handlers."""
self.value = 0xFF if on_level else 0
class LowBattery(GroupBase):
"""Low battery state."""
def __init__(
self, name: str, address: Address, group: int = 0, default: bool = None
):
"""Init the LowBattery class."""
super().__init__(name, address, group, default, value_type=bool)
# pylint: disable=arguments-differ
def set_value(self, low_battery):
"""Set the value of the state from the handlers."""
self.value = bool(low_battery)
class Heartbeat(GroupBase):
"""Heartbeat state."""
def __init__(
self, name: str, address: Address, group: int = 0, default: bool = None
):
"""Init the Hearbeat class."""
super().__init__(name, address, group, default, value_type=bool)
# pylint: disable=arguments-differ
def set_value(self, heartbeat):
"""Set the value of the state from the handlers."""
self.value = bool(heartbeat)
| 29.346939 | 79 | 0.625174 |
40332a55ffdea4d537efb0c75064cec5a3de7a15 | 6,821 | py | Python | admin_reorder/middleware.py | Jragon/django-modeladmin-reorder | 8c50354eed4cdcfbc12c5e4e4c4fca63beca718d | [
"BSD-3-Clause"
] | null | null | null | admin_reorder/middleware.py | Jragon/django-modeladmin-reorder | 8c50354eed4cdcfbc12c5e4e4c4fca63beca718d | [
"BSD-3-Clause"
] | null | null | null | admin_reorder/middleware.py | Jragon/django-modeladmin-reorder | 8c50354eed4cdcfbc12c5e4e4c4fca63beca718d | [
"BSD-3-Clause"
] | 1 | 2022-03-28T15:32:28.000Z | 2022-03-28T15:32:28.000Z | from copy import deepcopy
from django.conf import settings
from django.contrib import admin
from django.core.exceptions import ImproperlyConfigured
from django.urls import resolve, Resolver404
from django.utils.deprecation import MiddlewareMixin
class ModelAdminReorder(MiddlewareMixin):
settings_variable_name = 'ADMIN_REORDER'
def init_config(self, request, app_list):
self.request = request
self.app_list = app_list
self.config = getattr(settings, self.settings_variable_name, None)
if not self.config:
# ADMIN_REORDER settings is not defined.
raise ImproperlyConfigured(f'{self.settings_variable_name} config is not defined.')
if not isinstance(self.config, (tuple, list)):
raise ImproperlyConfigured(
'{name} config parameter must be tuple or list. '
'Got {config}'.format(name=self.settings_variable_name, config=self.config))
# admin_index = admin.site.index(request)
admin_site = self.get_admin_site()
admin_index = admin_site.index(request)
try:
# try to get all installed models
app_list = admin_index.context_data['app_list']
except KeyError:
# use app_list from context if this fails
pass
# Flatten all models from apps
self.models_list = []
for app in app_list:
for model in app['models']:
model['model_name'] = self.get_model_name(
app['app_label'], model['object_name'])
self.models_list.append(model)
def get_admin_site(self):
return admin.site
def get_app_list(self):
ordered_app_list = []
for app_config in self.config:
app = self.make_app(app_config)
if app:
ordered_app_list.append(app)
return ordered_app_list
def make_app(self, app_config):
if not isinstance(app_config, (dict, str)):
raise TypeError('{name} list item must be '
'dict or string. Got {config}'.format(
name=self.settings_variable_name, config=repr(app_config)
))
if isinstance(app_config, str):
# Keep original label and models
return self.find_app(app_config)
else:
return self.process_app(app_config)
def find_app(self, app_label):
for app in self.app_list:
if app['app_label'] == app_label:
return app
def get_model_name(self, app_name, model_name):
if '.' not in model_name:
model_name = '%s.%s' % (app_name, model_name)
return model_name
def process_app(self, app_config):
if 'app' not in app_config:
raise NameError('{name} list item must define '
'a "app" name. Got {config}'.format(
name=self.settings_variable_name,
config=repr(app_config)
))
app = self.find_app(app_config['app'])
if app:
app = deepcopy(app)
# Rename app
if 'label' in app_config:
app['name'] = app_config['label']
# Process app models
if 'models' in app_config:
models_config = app_config.get('models')
models = self.process_models(models_config)
if models:
app['models'] = models
else:
return None
return app
def process_models(self, models_config):
if not isinstance(models_config, (dict, list, tuple)):
raise TypeError('"models" config for {name} list '
'item must be dict or list/tuple. '
'Got {config}'.format(
name=self.settings_variable_name,
config=repr(models_config)
))
ordered_models_list = []
for model_config in models_config:
model = None
if isinstance(model_config, dict):
model = self.process_model(model_config)
else:
model = self.find_model(model_config)
if model:
ordered_models_list.append(model)
return ordered_models_list
def find_model(self, model_name):
for model in self.models_list:
if model['model_name'] == model_name:
return model
def process_model(self, model_config):
# Process model defined as { model: 'model', 'label': 'label' }
for key in ('model', 'label', ):
if key not in model_config:
return
model = self.find_model(model_config['model'])
if model:
model['name'] = model_config['label']
return model
def get_admin_site_url_names(self):
"""
List of admin site url_names where to apply middleware logic
"""
return ['index', 'app_list']
def process_template_response(self, request, response):
try:
url = resolve(request.path_info)
except Resolver404:
return response
if not url.app_name == 'admin' and \
url.url_name not in self.get_admin_site_url_names():
# current view is not a django admin index
# or app_list view, bail out!
return response
if 'app_list' in response.context_data:
app_list = response.context_data['app_list']
context_key = 'app_list'
elif 'available_apps' in response.context_data:
app_list = response.context_data['available_apps']
context_key = 'available_apps'
else:
# there is no app_list! nothing to reorder
return response
self.init_config(request, app_list)
ordered_app_list = self.get_app_list()
response.context_data[context_key] = ordered_app_list
return response
class ModelAdminReorderMiddlewareMixin(ModelAdminReorder):
"""
If you have multiple admin, then you can:
1) define your own middleware class inherited from ModelAdminReorderMiddlewareMixin
2) set settings_variable_name attribute
3) overwrite get_admin_site() method to return another admin.site
4) overwirte get_admin_site_url_names() method with appending your custom urls names
"""
# def get_admin_site(self):
# return short_admin_site
# def get_admin_site_url_names(self):
# names = super().get_admin_site_url_names()
# names.append('short_admin_index')
# return names
| 35.341969 | 95 | 0.584225 |
b4eafcf32b77878141e2b7408646db8d9940f8e3 | 13,701 | py | Python | autoprogram/sphinxcontrib/autoprogram.py | djoshea/sphinxcontrib-matlabdomain | f1ae1e6defc206676d4490665c1cfcc4796b9df0 | [
"BSD-2-Clause"
] | 2 | 2017-04-25T17:00:35.000Z | 2021-02-14T03:57:16.000Z | autoprogram/sphinxcontrib/autoprogram.py | djoshea/sphinxcontrib-matlabdomain | f1ae1e6defc206676d4490665c1cfcc4796b9df0 | [
"BSD-2-Clause"
] | null | null | null | autoprogram/sphinxcontrib/autoprogram.py | djoshea/sphinxcontrib-matlabdomain | f1ae1e6defc206676d4490665c1cfcc4796b9df0 | [
"BSD-2-Clause"
] | null | null | null | """
sphinxcontrib.autoprogram
~~~~~~~~~~~~~~~~~~~~~~~~~
Documenting CLI programs.
:copyright: Copyright 2014 by Hong Minhee
:license: BSD, see LICENSE for details.
"""
# pylint: disable=protected-access,missing-docstring
import argparse
import collections
try:
import builtins
except ImportError:
import __builtin__ as builtins
import functools
import os
import re
import six
import textwrap
import unittest
from docutils import nodes
from docutils.parsers.rst.directives import unchanged
from docutils.statemachine import ViewList
from sphinx.util.compat import Directive
from sphinx.util.nodes import nested_parse_with_titles
from sphinx.domains import std
__all__ = ('BOOLEAN_OPTIONS', 'AutoprogramDirective', 'ScannerTestCase',
'import_object', 'scan_programs', 'setup', 'suite')
def get_subparser_action(parser):
neg1_action = parser._actions[-1]
if isinstance(neg1_action, argparse._SubParsersAction):
return neg1_action
for a in parser._actions:
if isinstance(a, argparse._SubParsersAction):
return a
def scan_programs(parser, command=[], maxdepth=0, depth=0):
if maxdepth and depth >= maxdepth:
return
options = []
for arg in parser._actions:
if not (arg.option_strings or
isinstance(arg, argparse._SubParsersAction)):
name = (arg.metavar or arg.dest).lower()
desc = (arg.help or '') % {'default': arg.default}
options.append(([name], desc))
for arg in parser._actions:
if arg.option_strings:
if isinstance(arg, (argparse._StoreAction,
argparse._AppendAction)):
if arg.choices is None:
metavar = arg.metavar or arg.dest
if isinstance(metavar, tuple):
names = [
'{0} <{1}>'.format(
option_string, '> <'.join(metavar).lower()
)
for option_string in arg.option_strings
]
else:
names = [
'{0} <{1}>'.format(option_string, metavar.lower())
for option_string in arg.option_strings
]
else:
choices = '{0}'.format(','.join(arg.choices))
names = ['{0} {{{1}}}'.format(option_string, choices)
for option_string in arg.option_strings]
else:
names = list(arg.option_strings)
desc = (arg.help or '') % {'default': arg.default}
options.append((names, desc))
yield command, options, parser
if parser._subparsers:
choices = ()
subp_action = get_subparser_action(parser)
if subp_action:
choices = subp_action.choices.items()
if not (hasattr(collections, 'OrderedDict') and
isinstance(choices, collections.OrderedDict)):
choices = sorted(choices, key=lambda pair: pair[0])
for cmd, sub in choices:
if isinstance(sub, argparse.ArgumentParser):
for program in scan_programs(
sub, command + [cmd], maxdepth, depth + 1
):
yield program
def import_object(import_name):
module_name, expr = import_name.split(':', 1)
try:
mod = __import__(module_name)
except ImportError:
# This happens if the file is a script with no .py extension. Here we
# trick autoprogram to load a module in memory with the contents of
# the script, if there is a script named module_name. Otherwise, raise
# an ImportError as it did before.
import glob
import sys
import os
import imp
for p in sys.path:
f = glob.glob(os.path.join(p, module_name))
if len(f) > 0:
with open(f[0]) as fobj:
codestring = fobj.read()
foo = imp.new_module("foo")
six.exec_(codestring, foo.__dict__)
sys.modules["foo"] = foo
mod = __import__("foo")
break
else:
raise ImportError("No module named {}".format(module_name))
reduce_ = getattr(functools, 'reduce', None) or reduce
mod = reduce_(getattr, module_name.split('.')[1:], mod)
globals_ = builtins
if not isinstance(globals_, dict):
globals_ = globals_.__dict__
return eval(expr, globals_, mod.__dict__)
class AutoprogramDirective(Directive):
has_content = False
required_arguments = 1
option_spec = {
'prog': unchanged,
'maxdepth': unchanged,
'start_command': unchanged,
'strip_usage': unchanged,
'no_usage_codeblock': unchanged,
}
def make_rst(self):
import_name, = self.arguments
parser = import_object(import_name or '__undefined__')
parser.prog = self.options.get('prog', parser.prog)
start_command = self.options.get('start_command', '').split(' ')
strip_usage = 'strip_usage' in self.options
usage_codeblock = 'no_usage_codeblock' not in self.options
if start_command[0] == '':
start_command.pop(0)
if start_command:
def get_start_cmd_parser(p):
looking_for = start_command.pop(0)
action = get_subparser_action(p)
if not action:
raise ValueError('No actions for command ' + looking_for)
subp = action.choices[looking_for]
if start_command:
return get_start_cmd_parser(subp)
return subp
parser = get_start_cmd_parser(parser)
for commands, options, cmd_parser in scan_programs(
parser, maxdepth=int(self.options.get('maxdepth', 0))
):
title = cmd_parser.prog.rstrip()
usage = cmd_parser.format_usage()
if strip_usage:
to_strip = title.rsplit(' ', 1)[0]
len_to_strip = len(to_strip) - 4
usage_lines = usage.splitlines()
usage = os.linesep.join([
usage_lines[0].replace(to_strip, '...'),
] + [
l[len_to_strip:] for l in usage_lines[1:]
])
yield ''
yield '.. program:: ' + title
yield ''
yield title
yield ('!' if commands else '?') * len(title)
yield ''
yield cmd_parser.description or ''
yield ''
if usage_codeblock:
yield '.. code-block:: console'
yield ''
yield textwrap.indent(usage, ' ')
else:
yield usage
yield ''
for option_strings, help_ in options:
yield '.. option:: {0}'.format(', '.join(option_strings))
yield ''
yield ' ' + help_.replace('\n', ' \n')
yield ''
yield ''
for line in (cmd_parser.epilog or '').splitlines():
yield line or ''
def run(self):
node = nodes.section()
node.document = self.state.document
result = ViewList()
for line in self.make_rst():
result.append(line, '<autoprogram>')
nested_parse_with_titles(self.state, result, node)
return node.children
def patch_option_role_to_allow_argument_form():
"""Before Sphinx 1.2.2, :rst:dir:`.. option::` directive hadn't
allowed to not start with a dash or slash, so it hadn't been possible
to represent positional arguments (not options).
https://bitbucket.org/birkenfeld/sphinx/issue/1357/
It monkeypatches the :rst:dir:`.. option::` directive's behavior.
"""
std.option_desc_re = re.compile(r'((?:/|-|--)?[-_a-zA-Z0-9]+)(\s*.*)')
def setup(app):
app.add_directive('autoprogram', AutoprogramDirective)
patch_option_role_to_allow_argument_form()
class ScannerTestCase(unittest.TestCase):
def test_simple_parser(self):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='*',
help='an integer for the accumulator')
parser.add_argument('-i', '--identity', type=int, default=0,
help='the default result for no arguments '
'(default: 0)')
parser.add_argument('--sum', dest='accumulate', action='store_const',
const=sum, default=max,
help='sum the integers (default: find the max)')
parser.add_argument('--key-value', metavar=('KEY', 'VALUE'), nargs=2)
programs = scan_programs(parser)
programs = list(programs)
self.assertEqual(1, len(programs))
parser_info, = programs
program, options, cmd_parser = parser_info
self.assertEqual([], program)
self.assertEqual('Process some integers.', cmd_parser.description)
self.assertEqual(5, len(options))
self.assertEqual(
(['n'], 'an integer for the accumulator'),
options[0]
)
self.assertEqual(
(['-h', '--help'], 'show this help message and exit'),
options[1]
)
self.assertEqual(
(['-i <identity>', '--identity <identity>'],
'the default result for no arguments (default: 0)'),
options[2]
)
self.assertEqual(
(['--sum'], 'sum the integers (default: find the max)'),
options[3]
)
self.assertEqual(
(['--key-value <key> <value>', ], ''),
options[4]
)
def test_subcommands(self):
parser = argparse.ArgumentParser(description='Process some integers.')
subparsers = parser.add_subparsers()
max_parser = subparsers.add_parser('max', description='Find the max.')
max_parser.set_defaults(accumulate=max)
max_parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='An integer for the accumulator.')
sum_parser = subparsers.add_parser('sum',
description='Sum the integers.')
sum_parser.set_defaults(accumulate=sum)
sum_parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='An integer for the accumulator.')
programs = scan_programs(parser)
programs = list(programs)
self.assertEqual(3, len(programs))
# main
program, options, cmd_parser = programs[0]
self.assertEqual([], program)
self.assertEqual('Process some integers.', cmd_parser.description)
self.assertEqual(1, len(options))
self.assertEqual(
(['-h', '--help'],
'show this help message and exit'),
options[0]
)
# max
program, options, cmd_parser = programs[1]
self.assertEqual(['max'], program)
self.assertEqual('Find the max.', cmd_parser.description)
self.assertEqual(2, len(options))
self.assertEqual((['n'], 'An integer for the accumulator.'),
options[0])
self.assertEqual(
(['-h', '--help'],
'show this help message and exit'),
options[1]
)
# sum
program, options, cmd_parser = programs[2]
self.assertEqual(['sum'], program)
self.assertEqual('Sum the integers.', cmd_parser.description)
self.assertEqual(2, len(options))
self.assertEqual((['n'], 'An integer for the accumulator.'),
options[0])
def test_choices(self):
parser = argparse.ArgumentParser()
parser.add_argument("--awesomeness", choices=["meh", "awesome"])
program, options, cmd_parser = list(scan_programs(parser))[0]
log_option = options[1]
self.assertEqual((["--awesomeness {meh,awesome}"], ''), log_option)
def test_parse_epilog(self):
parser = argparse.ArgumentParser(
description='Process some integers.',
epilog='The integers will be processed.'
)
programs = scan_programs(parser)
programs = list(programs)
self.assertEqual(1, len(programs))
parser_data, = programs
program, options, cmd_parser = parser_data
self.assertEqual('The integers will be processed.', cmd_parser.epilog)
class UtilTestCase(unittest.TestCase):
def test_import_object(self):
cls = import_object('sphinxcontrib.autoprogram:UtilTestCase')
self.assertTrue(cls is UtilTestCase)
instance = import_object(
'sphinxcontrib.autoprogram:UtilTestCase("test_import_object")'
)
self.assertIsInstance(instance, UtilTestCase)
if not hasattr(unittest.TestCase, 'assertIsInstance'):
def assertIsInstance(self, instance, cls):
self.assertTrue(isinstance(instance, cls),
'{0!r} is not an instance of {1.__module__}.'
'{1.__name__}'.format(instance, cls))
suite = unittest.TestSuite()
suite.addTests(
unittest.defaultTestLoader.loadTestsFromTestCase(ScannerTestCase)
)
suite.addTests(unittest.defaultTestLoader.loadTestsFromTestCase(UtilTestCase))
| 35.311856 | 78 | 0.567331 |
a3fd17760707e8546ab15e429eda2b24d4277739 | 29,679 | py | Python | monitoring/google/cloud/monitoring_v3/proto/common_pb2.py | jingtaow/google-cloud-python | 409a425e174c66215fc2bc1575f8c6364be1df40 | [
"Apache-2.0"
] | null | null | null | monitoring/google/cloud/monitoring_v3/proto/common_pb2.py | jingtaow/google-cloud-python | 409a425e174c66215fc2bc1575f8c6364be1df40 | [
"Apache-2.0"
] | null | null | null | monitoring/google/cloud/monitoring_v3/proto/common_pb2.py | jingtaow/google-cloud-python | 409a425e174c66215fc2bc1575f8c6364be1df40 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/monitoring_v3/proto/common.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import distribution_pb2 as google_dot_api_dot_distribution__pb2
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/monitoring_v3/proto/common.proto",
package="google.monitoring.v3",
syntax="proto3",
serialized_options=_b(
"\n\030com.google.monitoring.v3B\013CommonProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3"
),
serialized_pb=_b(
'\n-google/cloud/monitoring_v3/proto/common.proto\x12\x14google.monitoring.v3\x1a\x1dgoogle/api/distribution.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xaa\x01\n\nTypedValue\x12\x14\n\nbool_value\x18\x01 \x01(\x08H\x00\x12\x15\n\x0bint64_value\x18\x02 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x03 \x01(\x01H\x00\x12\x16\n\x0cstring_value\x18\x04 \x01(\tH\x00\x12\x36\n\x12\x64istribution_value\x18\x05 \x01(\x0b\x32\x18.google.api.DistributionH\x00\x42\x07\n\x05value"l\n\x0cTimeInterval\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xad\x07\n\x0b\x41ggregation\x12\x33\n\x10\x61lignment_period\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x45\n\x12per_series_aligner\x18\x02 \x01(\x0e\x32).google.monitoring.v3.Aggregation.Aligner\x12G\n\x14\x63ross_series_reducer\x18\x04 \x01(\x0e\x32).google.monitoring.v3.Aggregation.Reducer\x12\x17\n\x0fgroup_by_fields\x18\x05 \x03(\t"\x8b\x03\n\x07\x41ligner\x12\x0e\n\nALIGN_NONE\x10\x00\x12\x0f\n\x0b\x41LIGN_DELTA\x10\x01\x12\x0e\n\nALIGN_RATE\x10\x02\x12\x15\n\x11\x41LIGN_INTERPOLATE\x10\x03\x12\x14\n\x10\x41LIGN_NEXT_OLDER\x10\x04\x12\r\n\tALIGN_MIN\x10\n\x12\r\n\tALIGN_MAX\x10\x0b\x12\x0e\n\nALIGN_MEAN\x10\x0c\x12\x0f\n\x0b\x41LIGN_COUNT\x10\r\x12\r\n\tALIGN_SUM\x10\x0e\x12\x10\n\x0c\x41LIGN_STDDEV\x10\x0f\x12\x14\n\x10\x41LIGN_COUNT_TRUE\x10\x10\x12\x15\n\x11\x41LIGN_COUNT_FALSE\x10\x18\x12\x17\n\x13\x41LIGN_FRACTION_TRUE\x10\x11\x12\x17\n\x13\x41LIGN_PERCENTILE_99\x10\x12\x12\x17\n\x13\x41LIGN_PERCENTILE_95\x10\x13\x12\x17\n\x13\x41LIGN_PERCENTILE_50\x10\x14\x12\x17\n\x13\x41LIGN_PERCENTILE_05\x10\x15\x12\x18\n\x14\x41LIGN_PERCENT_CHANGE\x10\x17"\xb1\x02\n\x07Reducer\x12\x0f\n\x0bREDUCE_NONE\x10\x00\x12\x0f\n\x0bREDUCE_MEAN\x10\x01\x12\x0e\n\nREDUCE_MIN\x10\x02\x12\x0e\n\nREDUCE_MAX\x10\x03\x12\x0e\n\nREDUCE_SUM\x10\x04\x12\x11\n\rREDUCE_STDDEV\x10\x05\x12\x10\n\x0cREDUCE_COUNT\x10\x06\x12\x15\n\x11REDUCE_COUNT_TRUE\x10\x07\x12\x16\n\x12REDUCE_COUNT_FALSE\x10\x0f\x12\x18\n\x14REDUCE_FRACTION_TRUE\x10\x08\x12\x18\n\x14REDUCE_PERCENTILE_99\x10\t\x12\x18\n\x14REDUCE_PERCENTILE_95\x10\n\x12\x18\n\x14REDUCE_PERCENTILE_50\x10\x0b\x12\x18\n\x14REDUCE_PERCENTILE_05\x10\x0c*\x9e\x01\n\x0e\x43omparisonType\x12\x1a\n\x16\x43OMPARISON_UNSPECIFIED\x10\x00\x12\x11\n\rCOMPARISON_GT\x10\x01\x12\x11\n\rCOMPARISON_GE\x10\x02\x12\x11\n\rCOMPARISON_LT\x10\x03\x12\x11\n\rCOMPARISON_LE\x10\x04\x12\x11\n\rCOMPARISON_EQ\x10\x05\x12\x11\n\rCOMPARISON_NE\x10\x06*a\n\x0bServiceTier\x12\x1c\n\x18SERVICE_TIER_UNSPECIFIED\x10\x00\x12\x16\n\x12SERVICE_TIER_BASIC\x10\x01\x12\x18\n\x14SERVICE_TIER_PREMIUM\x10\x02\x1a\x02\x18\x01\x42\xa3\x01\n\x18\x63om.google.monitoring.v3B\x0b\x43ommonProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3b\x06proto3'
),
dependencies=[
google_dot_api_dot_distribution__pb2.DESCRIPTOR,
google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
],
)
_COMPARISONTYPE = _descriptor.EnumDescriptor(
name="ComparisonType",
full_name="google.monitoring.v3.ComparisonType",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="COMPARISON_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="COMPARISON_GT", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="COMPARISON_GE", index=2, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="COMPARISON_LT", index=3, number=3, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="COMPARISON_LE", index=4, number=4, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="COMPARISON_EQ", index=5, number=5, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="COMPARISON_NE", index=6, number=6, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=1395,
serialized_end=1553,
)
_sym_db.RegisterEnumDescriptor(_COMPARISONTYPE)
ComparisonType = enum_type_wrapper.EnumTypeWrapper(_COMPARISONTYPE)
_SERVICETIER = _descriptor.EnumDescriptor(
name="ServiceTier",
full_name="google.monitoring.v3.ServiceTier",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="SERVICE_TIER_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="SERVICE_TIER_BASIC",
index=1,
number=1,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="SERVICE_TIER_PREMIUM",
index=2,
number=2,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=_b("\030\001"),
serialized_start=1555,
serialized_end=1652,
)
_sym_db.RegisterEnumDescriptor(_SERVICETIER)
ServiceTier = enum_type_wrapper.EnumTypeWrapper(_SERVICETIER)
COMPARISON_UNSPECIFIED = 0
COMPARISON_GT = 1
COMPARISON_GE = 2
COMPARISON_LT = 3
COMPARISON_LE = 4
COMPARISON_EQ = 5
COMPARISON_NE = 6
SERVICE_TIER_UNSPECIFIED = 0
SERVICE_TIER_BASIC = 1
SERVICE_TIER_PREMIUM = 2
_AGGREGATION_ALIGNER = _descriptor.EnumDescriptor(
name="Aligner",
full_name="google.monitoring.v3.Aggregation.Aligner",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="ALIGN_NONE", index=0, number=0, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ALIGN_DELTA", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ALIGN_RATE", index=2, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ALIGN_INTERPOLATE",
index=3,
number=3,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="ALIGN_NEXT_OLDER",
index=4,
number=4,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="ALIGN_MIN", index=5, number=10, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ALIGN_MAX", index=6, number=11, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ALIGN_MEAN", index=7, number=12, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ALIGN_COUNT", index=8, number=13, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ALIGN_SUM", index=9, number=14, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ALIGN_STDDEV", index=10, number=15, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ALIGN_COUNT_TRUE",
index=11,
number=16,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="ALIGN_COUNT_FALSE",
index=12,
number=24,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="ALIGN_FRACTION_TRUE",
index=13,
number=17,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="ALIGN_PERCENTILE_99",
index=14,
number=18,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="ALIGN_PERCENTILE_95",
index=15,
number=19,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="ALIGN_PERCENTILE_50",
index=16,
number=20,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="ALIGN_PERCENTILE_05",
index=17,
number=21,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="ALIGN_PERCENT_CHANGE",
index=18,
number=23,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=689,
serialized_end=1084,
)
_sym_db.RegisterEnumDescriptor(_AGGREGATION_ALIGNER)
_AGGREGATION_REDUCER = _descriptor.EnumDescriptor(
name="Reducer",
full_name="google.monitoring.v3.Aggregation.Reducer",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="REDUCE_NONE", index=0, number=0, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="REDUCE_MEAN", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="REDUCE_MIN", index=2, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="REDUCE_MAX", index=3, number=3, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="REDUCE_SUM", index=4, number=4, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="REDUCE_STDDEV", index=5, number=5, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="REDUCE_COUNT", index=6, number=6, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="REDUCE_COUNT_TRUE",
index=7,
number=7,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="REDUCE_COUNT_FALSE",
index=8,
number=15,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="REDUCE_FRACTION_TRUE",
index=9,
number=8,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="REDUCE_PERCENTILE_99",
index=10,
number=9,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="REDUCE_PERCENTILE_95",
index=11,
number=10,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="REDUCE_PERCENTILE_50",
index=12,
number=11,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="REDUCE_PERCENTILE_05",
index=13,
number=12,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=1087,
serialized_end=1392,
)
_sym_db.RegisterEnumDescriptor(_AGGREGATION_REDUCER)
_TYPEDVALUE = _descriptor.Descriptor(
name="TypedValue",
full_name="google.monitoring.v3.TypedValue",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="bool_value",
full_name="google.monitoring.v3.TypedValue.bool_value",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="int64_value",
full_name="google.monitoring.v3.TypedValue.int64_value",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="double_value",
full_name="google.monitoring.v3.TypedValue.double_value",
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="string_value",
full_name="google.monitoring.v3.TypedValue.string_value",
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="distribution_value",
full_name="google.monitoring.v3.TypedValue.distribution_value",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="value",
full_name="google.monitoring.v3.TypedValue.value",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=168,
serialized_end=338,
)
_TIMEINTERVAL = _descriptor.Descriptor(
name="TimeInterval",
full_name="google.monitoring.v3.TimeInterval",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.monitoring.v3.TimeInterval.end_time",
index=0,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.monitoring.v3.TimeInterval.start_time",
index=1,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=340,
serialized_end=448,
)
_AGGREGATION = _descriptor.Descriptor(
name="Aggregation",
full_name="google.monitoring.v3.Aggregation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="alignment_period",
full_name="google.monitoring.v3.Aggregation.alignment_period",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="per_series_aligner",
full_name="google.monitoring.v3.Aggregation.per_series_aligner",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="cross_series_reducer",
full_name="google.monitoring.v3.Aggregation.cross_series_reducer",
index=2,
number=4,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="group_by_fields",
full_name="google.monitoring.v3.Aggregation.group_by_fields",
index=3,
number=5,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_AGGREGATION_ALIGNER, _AGGREGATION_REDUCER],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=451,
serialized_end=1392,
)
_TYPEDVALUE.fields_by_name[
"distribution_value"
].message_type = google_dot_api_dot_distribution__pb2._DISTRIBUTION
_TYPEDVALUE.oneofs_by_name["value"].fields.append(
_TYPEDVALUE.fields_by_name["bool_value"]
)
_TYPEDVALUE.fields_by_name["bool_value"].containing_oneof = _TYPEDVALUE.oneofs_by_name[
"value"
]
_TYPEDVALUE.oneofs_by_name["value"].fields.append(
_TYPEDVALUE.fields_by_name["int64_value"]
)
_TYPEDVALUE.fields_by_name["int64_value"].containing_oneof = _TYPEDVALUE.oneofs_by_name[
"value"
]
_TYPEDVALUE.oneofs_by_name["value"].fields.append(
_TYPEDVALUE.fields_by_name["double_value"]
)
_TYPEDVALUE.fields_by_name[
"double_value"
].containing_oneof = _TYPEDVALUE.oneofs_by_name["value"]
_TYPEDVALUE.oneofs_by_name["value"].fields.append(
_TYPEDVALUE.fields_by_name["string_value"]
)
_TYPEDVALUE.fields_by_name[
"string_value"
].containing_oneof = _TYPEDVALUE.oneofs_by_name["value"]
_TYPEDVALUE.oneofs_by_name["value"].fields.append(
_TYPEDVALUE.fields_by_name["distribution_value"]
)
_TYPEDVALUE.fields_by_name[
"distribution_value"
].containing_oneof = _TYPEDVALUE.oneofs_by_name["value"]
_TIMEINTERVAL.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TIMEINTERVAL.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_AGGREGATION.fields_by_name[
"alignment_period"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_AGGREGATION.fields_by_name["per_series_aligner"].enum_type = _AGGREGATION_ALIGNER
_AGGREGATION.fields_by_name["cross_series_reducer"].enum_type = _AGGREGATION_REDUCER
_AGGREGATION_ALIGNER.containing_type = _AGGREGATION
_AGGREGATION_REDUCER.containing_type = _AGGREGATION
DESCRIPTOR.message_types_by_name["TypedValue"] = _TYPEDVALUE
DESCRIPTOR.message_types_by_name["TimeInterval"] = _TIMEINTERVAL
DESCRIPTOR.message_types_by_name["Aggregation"] = _AGGREGATION
DESCRIPTOR.enum_types_by_name["ComparisonType"] = _COMPARISONTYPE
DESCRIPTOR.enum_types_by_name["ServiceTier"] = _SERVICETIER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TypedValue = _reflection.GeneratedProtocolMessageType(
"TypedValue",
(_message.Message,),
dict(
DESCRIPTOR=_TYPEDVALUE,
__module__="google.cloud.monitoring_v3.proto.common_pb2",
__doc__="""A single strongly-typed value.
Attributes:
value:
The typed value field.
bool_value:
A Boolean value: ``true`` or ``false``.
int64_value:
A 64-bit integer. Its range is approximately ±9.2x1018.
double_value:
A 64-bit double-precision floating-point number. Its magnitude
is approximately ±10±300 and it has 16 significant digits of
precision.
string_value:
A variable-length string value.
distribution_value:
A distribution value.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.TypedValue)
),
)
_sym_db.RegisterMessage(TypedValue)
TimeInterval = _reflection.GeneratedProtocolMessageType(
"TimeInterval",
(_message.Message,),
dict(
DESCRIPTOR=_TIMEINTERVAL,
__module__="google.cloud.monitoring_v3.proto.common_pb2",
__doc__="""A closed time interval. It extends from the start time to the end time,
and includes both: ``[startTime, endTime]``. Valid time intervals depend
on the
```MetricKind`` </monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind>`__
of the metric value. In no case can the end time be earlier than the
start time.
- For a ``GAUGE`` metric, the ``startTime`` value is technically
optional; if no value is specified, the start time defaults to the
value of the end time, and the interval represents a single point in
time. Such an interval is valid only for ``GAUGE`` metrics, which are
point-in-time measurements.
- For ``DELTA`` and ``CUMULATIVE`` metrics, the start time must be
earlier than the end time.
- In all cases, the start time of the next interval must be at least a
microsecond after the end time of the previous interval. Because the
interval is closed, if the start time of a new interval is the same
as the end time of the previous interval, data written at the new
start time could overwrite data written at the previous end time.
Attributes:
end_time:
Required. The end of the time interval.
start_time:
Optional. The beginning of the time interval. The default
value for the start time is the end time. The start time must
not be later than the end time.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.TimeInterval)
),
)
_sym_db.RegisterMessage(TimeInterval)
Aggregation = _reflection.GeneratedProtocolMessageType(
"Aggregation",
(_message.Message,),
dict(
DESCRIPTOR=_AGGREGATION,
__module__="google.cloud.monitoring_v3.proto.common_pb2",
__doc__="""Describes how to combine multiple time series to provide different views
of the data. Aggregation consists of an alignment step on individual
time series (``alignment_period`` and ``per_series_aligner``) followed
by an optional reduction step of the data across the aligned time series
(``cross_series_reducer`` and ``group_by_fields``). For more details,
see `Aggregation </monitoring/api/learn_more#aggregation>`__.
Attributes:
alignment_period:
The alignment period for per-[time
series][google.monitoring.v3.TimeSeries] alignment. If
present, ``alignmentPeriod`` must be at least 60 seconds.
After per-time series alignment, each time series will contain
data points only on the period boundaries. If
``perSeriesAligner`` is not specified or equals
``ALIGN_NONE``, then this field is ignored. If
``perSeriesAligner`` is specified and does not equal
``ALIGN_NONE``, then this field must be defined; otherwise an
error is returned.
per_series_aligner:
The approach to be used to align individual time series. Not
all alignment functions may be applied to all time series,
depending on the metric type and value type of the original
time series. Alignment may change the metric type or the value
type of the time series. Time series data must be aligned in
order to perform cross-time series reduction. If
``crossSeriesReducer`` is specified, then ``perSeriesAligner``
must be specified and not equal ``ALIGN_NONE`` and
``alignmentPeriod`` must be specified; otherwise, an error is
returned.
cross_series_reducer:
The approach to be used to combine time series. Not all
reducer functions may be applied to all time series, depending
on the metric type and the value type of the original time
series. Reduction may change the metric type of value type of
the time series. Time series data must be aligned in order to
perform cross-time series reduction. If ``crossSeriesReducer``
is specified, then ``perSeriesAligner`` must be specified and
not equal ``ALIGN_NONE`` and ``alignmentPeriod`` must be
specified; otherwise, an error is returned.
group_by_fields:
The set of fields to preserve when ``crossSeriesReducer`` is
specified. The ``groupByFields`` determine how the time series
are partitioned into subsets prior to applying the aggregation
function. Each subset contains time series that have the same
value for each of the grouping fields. Each individual time
series is a member of exactly one subset. The
``crossSeriesReducer`` is applied to each subset of time
series. It is not possible to reduce across different resource
types, so this field implicitly contains ``resource.type``.
Fields not specified in ``groupByFields`` are aggregated away.
If ``groupByFields`` is not specified and all the time series
have the same resource type, then the time series are
aggregated into a single output time series. If
``crossSeriesReducer`` is not defined, this field is ignored.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.Aggregation)
),
)
_sym_db.RegisterMessage(Aggregation)
DESCRIPTOR._options = None
_SERVICETIER._options = None
# @@protoc_insertion_point(module_scope)
| 37.52086 | 2,966 | 0.649618 |
67cb81cd36905575e8c3450a86a1234449137fbb | 42,410 | py | Python | src/fastbt/experimental.py | webclinic017/fastbt | 715982cc454ee6fabcaa605188fd1aad7a32a376 | [
"MIT"
] | 1 | 2020-07-05T00:42:40.000Z | 2020-07-05T00:42:40.000Z | src/fastbt/experimental.py | webclinic017/fastbt | 715982cc454ee6fabcaa605188fd1aad7a32a376 | [
"MIT"
] | null | null | null | src/fastbt/experimental.py | webclinic017/fastbt | 715982cc454ee6fabcaa605188fd1aad7a32a376 | [
"MIT"
] | null | null | null | """
This is an experimental module.
Everything in this module is untested and probably incorrect.
Don't use them.
This is intended to be a place to develop new functions instead of
having an entirely new branch
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from numba import jit, njit
import os
from fastbt.utils import multi_args
import inspect
class DataSource:
"""
A dummy datasource class replacing the intake datasource
class to enable fast loading of this module
This is a temporary ugly fix
Intake catalog classes must be moved to a separate module
"""
def __init__(self):
pass
@jit
def v_cusum(array):
"""
Calcuate cusum - numba version
array
numpy array
returns
pos and neg arrays
"""
L = len(array)
pos = [0]
neg = [0]
pos_val = 0
neg_val = 0
d = np.diff(array)[1:]
for i in d:
if i >= 0:
pos_val += i
else:
neg_val += i
pos.append(pos_val)
neg.append(neg_val)
return (pos, neg)
@jit
def sign_change(array):
"""
Calcuate the sign change in an array
If the current value is positive and previous value negative, mark as 1.
If the current value is negative and previous value positive, mark as -1.
In case of no change in sign, mark as 0
"""
L = len(array)
arr = np.empty(L)
arr[0] = 0
for i in range(1, L):
# TO DO: Condition not handling edge case
if (array[i] >= 0) & (array[i-1] < 0):
arr[i] = 1
elif (array[i] <= 0) & (array[i-1] > 0):
arr[i] = -1
else:
arr[i] = 0
return arr
def cusum(array):
"""
Calcuate cusum
array
a pandas series with a timestamp or datetime index
The cusum is just an aggregate of positive and negative differences
returns
pandas dataframe with positive and negative cumulatives,
ratio, differences, regime change along with the original index
"""
pos = [0]
neg = [0]
pos_val = 0
neg_val = 0
d = array.diff()[1:]
for i in d:
if i >= 0:
pos_val += i
else:
neg_val += i
pos.append(pos_val)
neg.append(neg_val)
df = pd.DataFrame({'pos': pos, 'neg': neg}, index=array.index)
df['neg'] = df['neg'].abs()
df['d'] = df['pos'] - df['neg']
df['reg'] = sign_change(df.d.values)
df['ratio'] = df['pos'] / df['neg']
return df
def percentage_bar(data, step):
"""
Generate the number of timesteps taken for each
equivalent step in price
data
numpy 1d array
step
step size
"""
start = data[0]
nextStep = start + step
counter = 0
steps = [start]
period = [0]
for d in data:
if step >= 0:
if d > nextStep:
steps.append(nextStep)
period.append(counter)
nextStep += step
counter = 0
else:
counter+=1
elif step < 0:
if d < nextStep:
steps.append(nextStep)
period.append(counter)
nextStep += step
counter = 0
else:
counter+=1
# Final loop exit
steps.append(nextStep)
period.append(counter)
return (steps, period)
def high_breach(s):
"""
Given a series of values, returns a series
with consecutive highs as values and timestamp as index
s
series with timestamp as index
"""
highs = []
ts = []
max_val = 0
index = s.index.values
for i,v in enumerate(s.values):
if v > max_val:
highs.append(v)
ts.append(index[i])
max_val = v
return pd.Series(highs, index=ts)
def low_breach(s):
"""
Given a series of values, returns a series
with consecutive lows as values and timestamp as index
s
series with timestamp as index
"""
lows = []
ts = []
min_val = 1e+9 # Just setting an extreme value
index = s.index.values
for i,v in enumerate(s.values):
if v < min_val:
lows.append(v)
ts.append(index[i])
min_val = v
return pd.Series(lows, index=ts)
class ExcelSource(DataSource):
container = 'dataframe'
name = 'excel_loader'
version = '0.0.1'
partition_access = True
def __init__(self, datapath, metadata=None):
"""
Initialize with datapath and metadata
datapath
filename with entire path
"""
self.filename = datapath
self._source = pd.ExcelFile(self.filename)
super(ExcelSource, self).__init__(metadata=metadata)
def _get_schema(self):
sheets = self._source.sheet_names
return Schema(
datashape=None,
dtype=None,
shape=None,
npartitions= len(sheets),
extra_metadata = {'sheets': sheets}
)
def read_partition(self, sheet, **kwargs):
"""
Read a specific sheet from the list of sheets
sheet
sheet to read
kwargs
kwargs to the excel parse function
"""
self._load_metadata()
if sheet in self.metadata.get('sheets', []):
return self._source.parse(sheet, **kwargs)
else:
return 'No such sheet in the Excel File'
def read(self, **kwargs):
"""
Read all sheets into a single dataframe.
Sheetname is added as a column
kwargs
kwargs to the excel parse function
"""
self._load_metadata()
sheets = self.metadata.get('sheets')
collect = []
if len(sheets) > 1:
for sheet in sheets:
temp = self.read_partition(sheet, **kwargs)
temp['sheetname'] = sheet
collect.append(temp)
return pd.concat(collect, sort=False)
def _close(self):
self._source.close()
class HDFSource(DataSource):
"""
A simple intake container to load data from
HDF5 fixed formats
"""
container = 'dataframe'
name = 'HDF5_fixed_loader'
version = '0.0.1'
partition_access = True
def __init__(self, datapath, metadata=None, extension='h5'):
"""
Initialize with datapath and metadata
datapath
filename or directory
If the filename ends in any of the extensions given in the
extension argument, then it is treated as a HDF5 file
"""
self.source = datapath
self._ext = extension
# Check whether the given path is a directory or file
if os.path.exists(datapath):
if os.path.isfile(datapath):
self._source_type = 'file'
else:
self._source_type = 'directory'
else:
print('Not a valid file or directory')
return
self._get_schema()
super(HDFSource, self).__init__(metadata=metadata)
def _get_schema(self):
metadata = {
'ext': self._ext,
'src': self.source,
'type': self._source_type
}
file_dict = {}
if self._source_type == 'directory':
for root,directory,files in os.walk(self.source):
for file in files:
filename = os.path.join(root, file)
if filename.endswith(self._ext):
file_dict[file] = filename
metadata.update({'files': file_dict})
return Schema(
datashape=None,
dtype=None,
shape=None,
npartitions=len(file_dict),
extra_metadata = metadata
)
def read_partition(self, file, **kwargs):
"""
Read a specific sheet from the list of sheets
file
filename without extension
kwargs
kwargs to the excel parse function
"""
self._load_metadata()
ext = self.metadata.get('ext', self._ext)
srctype = self.metadata.get('type')
if srctype == 'file':
return pd.read_hdf(self.metadata.get('src'))
filename = '{file}.{ext}'.format(file=file, ext=ext)
if filename in self.metadata.get('files', []):
filepath = self.metadata['files'][filename]
return pd.read_hdf(filepath)
else:
return 'No such HDF file'
def read(self, **kwargs):
"""
Read all sheets into a single dataframe.
Sheetname is added as a column
kwargs
kwargs to the excel parse function
"""
self._load_metadata()
if self.metadata.get('type') == 'file':
return pd.read_hdf(self.metadata.get('src'))
else:
return 'The datasource is a directory.Use the read_partition method to read a specific file.'
def _close(self):
print('Not implemented')
def twin_plot(data, y_axis, x_axis='timestamp'):
"""
Create a bokeh plot with twin axes
"""
from bokeh.plotting import figure
from bokeh.models import LinearAxis, Range1d
TOOLTIPS = [
('datetime', '@x{%F %H:%M}'),
('value', '$y{0.00}')
]
y1,y2 = y_axis[0], y_axis[1]
h0 = data[y1].max()
l0 = data[y1].min()
h1 = data[y2].max()
l1 = data[y2].min()
p = figure(x_axis_type='datetime', y_range=(l0, h0),
tooltips=TOOLTIPS, height=240, width=600)
p.line(data[x_axis].values, data[y1].values,
color="red", legend=y1)
p.extra_y_ranges = {"foo": Range1d(l1,h1)}
p.line(data[x_axis], data[y2].values, color="blue",
y_range_name="foo", legend=y2)
p.add_layout(LinearAxis(y_range_name="foo", axis_label=y2), 'left')
p.hover.formatters= {'x': 'datetime'}
p.legend.location = 'top_center'
p.legend.click_policy = 'hide'
return p
def conditional(data, c1, c2, out=None):
"""
Create a conditional probability table with counts
data
dataframe
c1
condition as string
c2
list of conditions as strings
out
output format. If None, counts are returned.
If a function is passed, it is applied to
each of the conditions and the return value
of the function is stored for each condition.
The function should have a single argument that
takes a dataframe as an input.
returns a dictionary with the conditions and
the counts or the return value of each of the conditions
Note
----
1. The dataframe is queried with c1 and each of the conditions
in c2 are evaluated based on this result.
2. All conditions are evaluated using `df.query`
3. The condition strings should be valid columns in the dataframe
4. The function passed should have a single argument, the dataframe.
"""
dct = {}
if out is None:
out = len
df = data.query(c1)
dct[c1] = out(df)
for c in c2:
dct[c] = out(df.query(c))
return dct
class Catalog:
"""
A intake catalog creator
The catalog is created in the following manner
1. All files in the root directory are considered to
be separate files and loaded as such.
2. All directories and subdirectories inside the root
directory are considered to be separate data sources
3. Each of the files are matched against the extension
name and the corresponding mapper
4. Files inside a directory are randomly selected and the
file type is determined for the entire directory.
**It's assumed that all files inside any sub-directories
are of the same file type**
"""
def __init__(self, directory):
"""
directory
directory to search for files
"""
self._directory = directory
"""
All files in the below directories are added
individually as a data source
"""
self._file_dirs = ['files']
"""
**filetypes** is a dictionary with the file type as
key and a sub-dictionary with driver and extensions
as keys and the corresponding driver and extensions
as values.
It is a logical structure that maps a file type to
its intake driver since each file type can have more
than one extension. This dictionary is looped to get
the self._mappers for each extension.
**Assumed each filetype has a single driver but more than
one extension**
"""
filetypes = {
'excel': {
'driver': 'fastbt.experimental.ExcelSource',
'extensions': ['xls', 'xlsx']
},
'csv': {
'driver': 'intake.source.csv.CSVSource',
'extensions': ['csv', 'txt']
},
'hdf': {
'driver': 'fastbt.experimental.HDFSource',
'extensions': ['h5', 'hdf5']
}
}
self._mappers = {}
for k,v in filetypes.items():
for ext in v['extensions']:
self._mappers[ext] = v['driver']
def generate_catalog(self):
"""
Generate catalog
#TO DO#
1. Replace multiple dots with underscores in filenames
"""
dct = {}
dct['sources'] = {}
src = dct['sources']
def metadata():
"""
metadata generation for the file; has access to
all variables inside the parent function
"""
return {
'args': {
first_arg: os.path.join(dirpath, file)
},
'driver': self._mappers[ext],
'description': '',
'metadata': {
'extension': ext,
'mode': mode
}
}
for dirpath,dirnames,filenames in os.walk(self._directory):
dirname = dirpath.split('/')[-1] #
if dirname in self._file_dirs:
mode = 'file'
for file in filenames:
ext = file.split('.')[-1]
if 'csv' in ext:
first_arg = 'urlpath'
else:
first_arg = 'datapath'
if ext in self._mappers:
src[file.split('.')[0]] = metadata()
else:
mode = 'dir'
# If the directory has any files
if len(filenames) > 0:
# Check the extension of the first file in directory
ext = filenames[0].split('.')[-1]
file = '*'
if 'csv' in ext:
first_arg = 'urlpath'
file = '*'
else:
first_arg = 'datapath'
file = ''
if ext in self._mappers:
src[dirname] = metadata()
return dct
def candlestick_plot(data):
"""
return a bokeh candlestick plot
data
dataframe with open,high,low and close columns
Note
-----
Prototype copied from the below link
https://bokeh.pydata.org/en/latest/docs/gallery/candlestick.html
"""
from math import pi
from bokeh.plotting import figure, show, output_file
from bokeh.models import ColumnDataSource
df = data.copy()
df["date"] = pd.to_datetime(df["date"])
df['color'] = ['green' if x > y else 'red' for (x,y) in
zip(df.close, df.open)]
source = ColumnDataSource()
source.data = source.from_df(df)
w = 10*60*1000 # half day in ms
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
p = figure(x_axis_type="datetime", tools=TOOLS,
title = "Candlestick", plot_width=800,
tooltips=[
('date', '@date{%F}'),
('open', '@open{0}'),
('high', '@high{0}'),
('low', '@low{0}'),
('close', '@close{0}')
])
p.hover.formatters= {'date': 'datetime'}
p.xaxis.major_label_orientation = pi/4
p.grid.grid_line_alpha=0.3
p.segment('date', 'high', 'date', 'low',
color="black", source=source)
p.vbar('date', w, 'open', 'close',
fill_color='color', line_color="black", source=source)
return p, source
def calendar_plot(data, field='ret'):
"""
return a calendar plot
data
dataframe with year and month columns
field
field to plot values
Note
-----
Prototype copied from bokeh gallery
https://bokeh.pydata.org/en/latest/docs/gallery/unemployment.html
"""
from math import pi
from bokeh.models import LinearColorMapper, BasicTicker, PrintfTickFormatter, ColorBar
from bokeh.plotting import figure
from bokeh.palettes import Spectral
df = data.copy()
# Type conversion
df['year'] = df.year.astype('str')
df['month'] = df.month.astype('str')
years = list(df.year.unique())
months = [str(x) for x in range(12,0,-1)]
colors = list(reversed(Spectral[6]))
mapper = LinearColorMapper(palette=colors,
low=df[field].min(), high=df[field].max())
TOOLS = "hover,save,pan,box_zoom,reset,wheel_zoom"
p = figure(title="Calendar Plot",
x_range=years, y_range=list(reversed(months)),
x_axis_location="above", plot_width=1000, plot_height=600,
tools=TOOLS, toolbar_location='below',
tooltips=[('date', '@year-@month'), ('return', '@{}'.format(field))])
# Axis settings
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "10pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = pi / 3
p.rect(x="year", y="month", width=1, height=1,
source=df, fill_color={'field': field, 'transform': mapper},
line_color='black', line_width=0.5, line_alpha=0.3)
color_bar = ColorBar(color_mapper=mapper, major_label_text_font_size="8pt",
ticker=BasicTicker(desired_num_ticks=len(colors)),
formatter=PrintfTickFormatter(format="%d%%"),
label_standoff=6, border_line_color=None, location=(0, 0))
p.add_layout(color_bar, 'right')
return p
def summary_plot(data):
"""
Given a dataframe, create a widget with condition and list
of columns to provide summary data
data
dataframe
Note
-----
* Condition is a valid eval string supported by pandas
* Multiple columns can be selected
* Summary statistics provided by the describe method
"""
from bokeh.models import (
TextInput, Button, Select, MultiSelect,
ColumnDataSource, PreText,Paragraph
)
from bokeh.plotting import figure
from bokeh.layouts import row, column, layout
df = data.copy()
def document(doc):
condition = TextInput(title='Enter your condition')
col = MultiSelect(title='Select the columns', options=list(df.columns))
button = Button(label='Update')
pre = PreText()
pre.text = condition.value
def update():
cond = condition.value
cols = col.value
text = df.query(cond)[cols].describe()
pre.text = str(text)
button.on_click(update)
l = layout([
[[condition,button], col],
pre
])
doc.add_root(l)
return document
def slider_plot(data, cols):
"""
Given a dataframe, create a widget with range slider
data
dataframe
cols
columns for which sliders are to be created
"""
from bokeh.models import (
TextInput, Button, RangeSlider, MultiSelect,
ColumnDataSource, PreText
)
from bokeh.plotting import figure
from bokeh.layouts import row, column, layout, gridplot
df = data.copy()
def document(doc):
sliders = []
pre = PreText(text='something')
select = MultiSelect(title='Select the columns', options=list(df.columns))
button = Button(label='Update')
for col in cols:
MIN,MAX = df[col].min(), df[col].max()
STEP = (MAX-MIN)/100
slider = RangeSlider(start=MIN, end=MAX, step=STEP,
value=(MIN,MAX),title=col)
sliders.append(slider)
def update():
values = []
txt = '({col} > {low}) & ({col} < {high})'
for slider in sliders:
low,high = slider.value
low,high = float(low), float(high)
formula = txt.format(col=slider.title,low=low,high=high)
values.append(formula)
q = '&'.join(values)
summary_cols = select.value
text = df.query(q)[summary_cols].describe()
pre.text = str(text)
button.on_click(update)
l = layout(
[column(sliders), [select,button]],
pre)
doc.add_root(l)
return document
def run_simulation(data, size=0.3, num=1000, column=None, function=np.mean):
"""
run a simulation on the given data by drawing repeated samples
and evaluating a metric
data
a pandas dataframe
size
size of the sample - float/int
if size is a float and less than , it is considered a percentage
in case of a num it is considered as the number of samples to take
num
number of simulations to be run
column
column for which sample is to be taken.
If None, the entire dataframe is sampled and this is considerably slow
function
function to be run on the column - string/function
default mean
returns a pandas Series with the result for each sample taken
"""
choice = np.random.choice
if size < 1:
N = int(len(data) * size)
else:
N = size
collect = []
if column is None:
# Take sample on the whole dataframe
for i in range(num):
sample = function(data.sample(N))
collect.append(sample)
else:
# Take sample on a particular column
# considerably faster since this is a numpy version
col = data[column].values
for i in range(num):
sample = function(choice(col, N))
collect.append(sample)
return pd.Series(collect)
def generate_parameters(dict_of_parameters):
"""
Generate a list of parameters for backtest function
dict_of_parameters
dictionary of parameters in the given format
Note
-----
Only one level of nested dictionary is parsed
"""
from itertools import product
d = dict_of_parameters.copy()
lst = []
def simple(dictionary, updt):
# An inner function for ugly lookup
# TO DO: Think of better way -> recursion
listed = []
for k,v in dictionary.items():
if isinstance(v, list):
listed.append([{k:val} for val in v])
elif isinstance(v, str):
listed.append([{k:v}])
temp = list(product(*listed))
lstk = []
for tp in temp:
temp_dict = updt.copy()
for t in tp:
temp_dict.update(t)
lstk.append(temp_dict)
return lstk
for k,v in d.items():
if isinstance(v, list):
lst.append([{k:val} for val in v])
elif isinstance(v, str):
lst.append([{k:v}])
elif isinstance(v, dict):
dict_lst = []
for k1,v1 in v.items():
lst2 = simple(d[k][k1], {k:k1})
dict_lst.extend(lst2)
lst.append(dict_lst)
return lst
@njit
def traverse(high, low, points):
"""
See whether the price points are hit in the given order
high
high values as numpy array
low
low values as numpy array
points
list or numpy array of values to check
returns a 4-tuple with
1. an array indicating whether the values in
the points list is hit; 1 indicates the value is hit
2. an array of timesteps indicating when the
value is hit; this is just the iteration cycle
3. high value when the last point is hit or the
end of the iteration cycle if the point is not hit
4. low value when the last point is hit or the
end of the iteration cycle if the point is not hit
Note
----
1. This function checks whether the points are hit
in the given order.
2. A point is considered hit if it is between high and
low value
3. high and low values are considered to be array of equal
size with the value of high always greater than or equal
to the value of low at every timestep
"""
j = 0
price = points[j]
hit = np.zeros(len(points))
timesteps = np.zeros(len(points))
for i in range(len(high)):
if low[i] < price < high[i]:
hit[j] = 1
timesteps[j] = i
j+=1
if j == len(points):
break
else:
price = points[j]
return (hit, timesteps, high[i], low[i])
class Strategy:
"""
An automated strategy implementation class
"""
def __init__(self, **kwargs):
"""
keyword arguments
result_cols
column names for any extra value emitted
from the tradebook function. The first 4
columns are automatically named entry_time,
entry_price, exit_time and exit_price and
must correspond accordingly
"""
self.datas = []
self.result_cols = kwargs.pop('result_cols', None)
@staticmethod
def tradebook(open, high, low, close, **kwargs):
"""
Numpy tradebook implementation with logic.
To be implemented for each case
Note
-----
1) Should always return an array of values
2) The array must be a one-dimensional array
3) The first 4 values in the array must be
entry time, entry price, exit time and exit price
4) It is assumed only one trade is done each day
5) For multiple trades, modify the result function
"""
raise NotImplementedError
def _agged(self, times, data=None):
"""
Get the aggregated data for the given times
data
intraday dataframe. If None, self.datas[0] is picked
times
list of 2-tuples with start and end times
Note
-----
1) For each time tuple given in list, data is aggregated
in OHLC form and added to a dataframe
2) Columns are numbered based on the order of the times
list so that the first tuple is named 0, the next 1 and so on
3) Expect a date and timestamp column in the list
"""
from fastbt.utils import recursive_merge
agged = {
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last'
}
dfs = []
if not(data):
data = self.datas[0]
for i,(s,e) in enumerate(times):
temp = data.set_index('timestamp').between_time(s,e).groupby('date').agg(agged)
columns = {x:'{x}{i}'.format(x=x,i=i)
for x in ['open', 'high', 'low', 'close']}
temp.rename(columns=columns, inplace=True)
dfs.append(temp)
return recursive_merge(dfs, on=['date']).reset_index()
def _each_day(self, data=None, cols=None, **kwargs):
"""
Runs the tradebook function on each day.
data
dataframe on which tradebook simulation is run.
In case of None, the last item in self.datas is used
The dataframe must have a date column and it must
be sorted by timestamp
cols
columns to be passed to the tradebook function.
The columns should be argument names in the
tradebook function
kwargs
keyword arguments would be passed to the tradebook function
"""
if not(data):
data = self.datas[-1] # the last appended data
grouped = data.groupby('date')
if not(cols):
cols = ['open', 'high', 'low', 'close']
tradebook = self.tradebook
def tb(x):
dct = {c:x[c].values for c in cols}
kwargs.update(dct)
return tradebook(**kwargs)
res = grouped.apply(tb)
return res
def result(self, **kwargs):
"""
Result of the strategy.
The output of the tradebook function is the input to
this function. Converts the array into a dataframe
and adds some useful columns
"""
tmp = self._each_day(**kwargs)
def get_column_names():
"""
Get column names
"""
cols = ['entry_time', 'entry_price', 'exit_time', 'exit_price']
if self.result_cols:
print(cols + self.result_cols)
return cols + self.result_cols
else:
L = len(tmp.iloc[0])
return cols + [f'col{i}' for i in range(4,L)]
res = pd.DataFrame(tmp.values.tolist(),
index=tmp.index,
columns=get_column_names())
res['year'] = res.index.year
res['profit'] = res.eval('exit_price-entry_price')
res['cum_p'] = res.profit.cumsum()
res['max_p'] = res.cum_p.expanding().max()
return res
def metric(self):
"""
Default metric to use
"""
return
def plot(self, data):
"""
A general plotting function
"""
data[['cum_p', 'max_p']].plot()
plt.figure()
data.groupby('year').profit.sum().plot.bar()
def simulator(self, **kwargs):
"""
Run simulation and return results
kwargs
keyword arguments where argument is the name of
the variable and values as list the variable takes.
The keyword variable is passed to the tradebook
function apart from OHLC values
"""
pass
def run(self, plotting=True, **kwargs):
"""
Run the entire process
"""
res = self.result(**kwargs)
if plotting:
self.plot(res)
return res # For further analysis
def advances(data, date='date', column=None, out='advances'):
"""
Get the advances by day
data
dataframe with the necessary columns
date
date column in the dataframe to group by
column
column on which ratio would be calculated, usually a returns column
If column is not available, the close/open return is calculated
out
output format one of advances/declines/difference/ratio/all
all returns everything
"""
data = data.copy()
if column is None:
data['pret'] = data.eval('(close/open)-1')
column = 'pret'
col = 'is_' + column
data[col] = data.eval(f'{column}>0')+0
data2 = data.groupby([date, col]).size().unstack().fillna(0).astype(int)
data2.columns = ['declines', 'advances']
data2['difference'] = data2.eval('advances-declines')
data2['ratio'] = data2.eval('advances/declines')
if out == 'all':
return data2
else:
return data2[out]
class CodeGenerator:
def __init__(self, name, blocks=None):
self._name = name
self._struct = []
self._blocks = {}
self._block_names = []
@property
def name(self):
return self._name
def add_block(self, name, indent=False, n=4):
"""
Add a code block
name
name of the code block
indent
whether to apply indentation to the code
only one level of indentation is added
Note
-----
1. name should be a key in the blocks dictionary
2. indentation is provided with 4 spaces
"""
self._block_names.append(name)
if indent:
txt = '{{' + str(name) + ' | indent(' + str(n) + ', first=True)}}'
else:
txt = '{{' + str(name) + '}}'
self._struct.append(txt)
def add_text(self, text):
"""
Add text
text
Text to add
the given text is added without any variable interpolation
"""
self._struct.append(text)
def add_code_block(self, name, block):
"""
Add a code block to the existing blocks dictionary
name
name of the block
block
actual code
If name is already in the blocks dictionary,
it would be overwritten
"""
self._blocks[name] = block
def clear(self):
"""
Clear everything in the existing structure
"""
self._struct = []
self._block_names = []
def generate_code(self):
from jinja2 import Template
code = '\n'.join(self._struct)
template = Template(code)
substitution = {b:self._blocks.get(b) for b in self._block_names}
return template.render(**substitution)
def save_file(self, filename=None):
if not(filename):
filename = self.name + '.py'
code = self.generate_code()
with open(filename, 'w') as f:
f.write(code)
def renko_plot(data, bricks_col='brick'):
"""
Draw a renko plot from the given dataframe
data
dataframe containing renko data
bricks_col
name of the bricks column; default brick
Note
-----
Brick size is calculated from bricks column automatically
"""
from bokeh.plotting import figure
data = data.copy()
brick_size = abs(data[bricks_col].iloc[0] - data[bricks_col].iloc[1])
data['left'] = range(len(data))
data['right'] = data.left.values + 1
data['top'] = data[bricks_col].values
data['bottom'] = data.top + brick_size
data['move'] = (data.brick>data.brick.shift(1))+0
data['color'] = ['green' if x == 1 else 'red' for x in data.move.values]
p = figure(title='Renko chart')
p.quad(top='top', bottom='bottom', left='left', right='right',color= 'color',
source=data)
return p
class DayTrading:
def __init__(self, data=None, interval=None, tradebook=None, tradebook_args={}):
"""
arguments to be passed to init
data
primary dataframe
this is the dataframe on which all work would be done
interval
interval as pandas dataframe
tradebook
a valid tradebook function
tradebook_args
extra argument to be passed to tradebook
"""
self._interval = interval
self._data = data
self._sources = {}
self._tradebook = tradebook
self._tradebook_args = tradebook_args
import pyfolio as pf
self.pf = pf
@property
def data(self):
return self._data
@staticmethod
def agged(data, interval='5min', column_name='timestamp'):
return data.set_index(column_name).resample(interval, label='right').agg({
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last'
})
def _by_day(self):
"""
Run the function on each day
"""
func_spec = inspect.getfullargspec(self._tradebook)
columns = self.data.columns
def f(data):
kwargs = {}
for arg in func_spec.args:
if arg in columns:
if arg in func_spec.annotations.keys():
kwargs[arg] = data[arg].values[0]
else:
kwargs[arg] = data[arg].values
kwargs.update(self._tradebook_args)
return self._tradebook(**kwargs)
grouped = self.data.groupby('date')
tbs = grouped.apply(f)
trades = []
for v in tbs.values:
trades.extend(v.all_trades)
return trades
# This is excess to be corrected
all_trades = pd.DataFrame(trades)
all_trades['date'] = pd.to_datetime(all_trades.ts.dt.date)
all_trades['value'] = all_trades.eval('price*qty*-1')
return all_trades.tail()
def _convert_to_legs(self, result=None):
"""
Convert trades to daily legs for better summary
result
a tradebook with alternate buy and sell trades
"""
if result is None:
result = self._result
x = range(0, len(result), 2)
y = range(1, len(result), 2)
trds = []
for m, n in zip(x, y):
a = result[m]
b = result[n]
d = {}
d['symbol'] = a['symbol']
d['order'] = a['order']
d['entry_time'] = a['ts']
d['entry_price'] = a['price']
d['qty'] = a['qty']
d['exit_time'] = b['ts']
d['exit_price'] = b['price']
trds.append(d)
trds = pd.DataFrame(trds)
trds['entry_time'] = pd.to_datetime(trds.entry_time)
trds['exit_time'] = pd.to_datetime(trds.exit_time)
trds['hour'] = trds.entry_time.dt.hour
trds['date'] = pd.to_datetime(trds.entry_time.dt.date)
trds['pnl'] = trds.eval('(exit_price-entry_price)*qty')
return trds
@property
def summary(self):
return self._summary
def add_source(self, name:str, data:str):
"""
adds a data source to the existing sources.
you could access this as an attribute
name
the name of the data source
data
actual data as a pandas dataframe
Note
-----
This is just a convenient function to
add a datasource as an attribute
"""
self._sources[name] = data
setattr(self, name, data)
def perf_stats(self, cost=0, capital=1000):
returns = (self.summary.set_index('date').pnl-cost)/capital
return self.pf.timeseries.perf_stats(returns)
def drawdown_table(self, cost=0, capital=1000):
returns = (self.summary.set_index('date').pnl-cost)/capital
return self.pf.timeseries.gen_drawdown_table(returns)
def run(self):
print('Started running the program')
self._result = self._by_day()
summary = self._convert_to_legs()
self._summary = summary
def single_filter(frame, col1, col2, func=np.mean):
"""
Create a single filter and returns results as a dictionary
frame
dataframe
col1
column on which the filter is to be applied
col2
list of columns with each column being a categorical
func
arbitary function to be applied on each group
"""
collect = {}
for col in col2:
val = frame.groupby(col)[col1].apply(func)
collect[col] = val
return collect
@njit
def clean_ticks(price, threshold=10):
"""
Clean out of sample ticks
"""
length = len(price)
arr = np.zeros(length)
s = price[0]
dropped = 0
nobs = 0
for i in np.arange(1,length):
if np.abs(price[i]-s) < threshold:
arr[i] = price[i]
s = price[i]
nobs = 0
else:
nobs+=1
dropped+=1
if nobs > 10:
s = price[i]
return arr[arr>0]
class WalkForward:
def __init__(self,data,lb=120,rb=30,factor=None,
column=None):
from collections import defaultdict, namedtuple
self.data = data
self.lb = lb
self.rb = rb
self._splits = defaultdict(list)
self._factor = factor
self._column = column
self._results = []
self.conf = []
self.forward = []
@property
def factor(self):
return self._factor
@property
def column(self):
return self._column
def get_splits(self):
return self._splits
def _generate_splits(self):
lb, rb = self.lb, self.rb
indexes = range(lb, len(self.data), rb)
for index in indexes:
train = self.data.iloc[index-lb:index]
test = self.data.iloc[index:index+rb]
self._splits['train'].append(train)
self._splits['test'].append(test)
def set_factor(self, factor):
"""
The factor should be a categorical column in data
You can also provide a list of category columns
"""
if factor in self.data.columns:
self._factor = factor
return self._factor
else:
return 'Factor not found'
def set_column(self, column):
"""
The column to test
"""
if column in self.data.columns:
self._column = column
return self._column
else:
return 'Column not found'
def run(self):
"""
Run all tests
"""
fac = self.factor
col = self.column
self._generate_splits()
splits = self.get_splits()
for train in splits['train']:
t1 = train.groupby(fac)[col].agg(['size', 'mean']).to_dict('index')
self._results.append(t1)
for test in splits['test']:
t2 = test.groupby(fac)[col].agg(['size', 'mean']).to_dict('index')
self.forward.append(t2)
def run_conf(self):
"""
Run confidence test
"""
print('Running conf')
train = self.get_splits()['train']
results = self._results
for data,res in zip(train, results):
tup = []
for k,v in res.items():
s = run_simulation(data, size=v['size'], column=self.column)
conf = len(s[s>v['mean']])/1000
tup.append((k, v['mean'], conf))
self.conf.append(tup)
| 30.75417 | 105 | 0.557392 |
0ce6cd86f7a9c3528be90b3f67452c9a6e5e2286 | 2,703 | py | Python | lambdas/run_lambda.py | fiskus/quilt | a5945a111a3065ecd23e64d069aa67e42492c5f2 | [
"Apache-2.0"
] | null | null | null | lambdas/run_lambda.py | fiskus/quilt | a5945a111a3065ecd23e64d069aa67e42492c5f2 | [
"Apache-2.0"
] | null | null | null | lambdas/run_lambda.py | fiskus/quilt | a5945a111a3065ecd23e64d069aa67e42492c5f2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
from base64 import b64decode, b64encode
from http.server import BaseHTTPRequestHandler, HTTPServer
import sys
from urllib.parse import urlparse, parse_qsl, unquote
from index import lambda_handler
PORT = 8080
LAMBDA_PATH = '/lambda'
class Handler(BaseHTTPRequestHandler):
def _handle_request(self, req_body):
parsed_url = urlparse(self.path)
path = unquote(parsed_url.path)
if path == LAMBDA_PATH or path.startswith(LAMBDA_PATH + '/'):
query = dict(parse_qsl(parsed_url.query, keep_blank_values=True))
# BaseHTTPRequestHandler API gives us a case-insensitive dict
# of headers, while the lambda API uses lowercase header names.
# So change the keys to lowercase to match the lambda API.
headers = {k.lower(): v for k, v in self.headers.items()}
args = {
'httpMethod': self.command,
'path': path,
'pathParameters': {
'proxy': path[len(LAMBDA_PATH) + 1:]
},
'queryStringParameters': query or None,
'headers': headers or None,
'body': b64encode(req_body or b''),
'isBase64Encoded': True
}
result = lambda_handler(args, None)
code = result['statusCode']
headers = result['headers']
body = result['body']
encoded = result.get("isBase64Encoded", False)
if encoded:
body = b64decode(body)
else:
body = body.encode()
headers['Content-Length'] = str(len(body))
self.send_response(code)
for name, value in headers.items():
self.send_header(name, value)
self.end_headers()
self.wfile.write(body)
else:
self.send_response(404)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write(b'Not Found')
def do_GET(self):
self._handle_request(None)
def do_POST(self):
size = int(self.headers.get('Content-Length', '0'))
body = self.rfile.read(size)
self._handle_request(body)
def do_OPTIONS(self):
self._handle_request(None)
def main(argv):
if len(argv) != 1:
print('Usage: %s', file=sys.stderr)
return 1
server_address = ('127.0.0.1', PORT)
print("Running on http://%s:%d%s" % (server_address[0], server_address[1], LAMBDA_PATH))
server = HTTPServer(server_address, Handler)
server.serve_forever()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 30.370787 | 92 | 0.578986 |
f0af9754d60b3d12e96532ea5ec5f8b8a43a5831 | 34,138 | py | Python | src/ktool/dyld.py | KritantaDev/kdump | 2c0e24f7006ec33be9f19aaca250bda8aea99148 | [
"MIT"
] | 9 | 2021-07-15T18:08:15.000Z | 2021-07-28T07:25:10.000Z | src/ktool/dyld.py | KritantaDev/kdump | 2c0e24f7006ec33be9f19aaca250bda8aea99148 | [
"MIT"
] | null | null | null | src/ktool/dyld.py | KritantaDev/kdump | 2c0e24f7006ec33be9f19aaca250bda8aea99148 | [
"MIT"
] | null | null | null | #
# ktool | ktool
# dyld.py
#
# This file includes a lot of utilities, classes, and abstractions
# designed for replicating certain functionality within dyld.
#
# This file is part of ktool. ktool is free software that
# is made available under the MIT license. Consult the
# file "LICENSE" that is distributed together with this file
# for the exact licensing terms.
#
# Copyright (c) kat 2021.
#
import math
from collections import namedtuple
from enum import Enum
from typing import List, Union, Dict
from kmacho import (
MH_FLAGS,
MH_FILETYPE,
LOAD_COMMAND,
BINDING_OPCODE,
LOAD_COMMAND_MAP,
BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB,
BIND_SUBOPCODE_THREADED_APPLY, MH_MAGIC_64
)
from kmacho.structs import *
from kmacho.base import Constructable
from .macho import _VirtualMemoryMap, Segment, Slice
from .util import log, macho_is_malformed
class ImageHeader(Constructable):
"""
This class represents the Mach-O Header
It contains the basic header info along with all load commands within it.
It doesn't handle complex abstraction logic, it simply loads in the load commands as their raw structs
"""
@classmethod
def from_image(cls, macho_slice) -> 'ImageHeader':
image_header = ImageHeader()
offset = 0
header: dyld_header = macho_slice.load_struct(offset, dyld_header)
if header.magic == MH_MAGIC_64:
header: dyld_header_64 = macho_slice.load_struct(offset, dyld_header_64)
image_header.is64 = True
raw = header.raw
image_header.filetype = MH_FILETYPE(header.filetype)
for flag in MH_FLAGS:
if header.flags & flag.value:
image_header.flags.append(flag)
offset += header.SIZE
load_commands = []
for i in range(1, header.loadcnt + 1):
cmd = macho_slice.get_int_at(offset, 4)
cmd_size = macho_slice.get_int_at(offset + 4, 4)
cmd_raw = macho_slice.get_bytes_at(offset, cmd_size)
try:
load_cmd = Struct.create_with_bytes(LOAD_COMMAND_MAP[LOAD_COMMAND(cmd)], cmd_raw)
load_cmd.off = offset
except ValueError:
unk_lc = macho_slice.load_struct(offset, unk_command)
load_cmd = unk_lc
except KeyError:
unk_lc = macho_slice.load_struct(offset, unk_command)
load_cmd = unk_lc
load_commands.append(load_cmd)
raw += cmd_raw
offset += load_cmd.cmdsize
image_header.raw = raw
image_header.dyld_header = header
image_header.load_commands = load_commands
return image_header
@classmethod
def from_values(cls, *args, **kwargs):
pass
def __init__(self):
self.is64 = False
self.dyld_header = None
self.filetype = MH_FILETYPE(0)
self.flags: List[MH_FILETYPE] = []
self.load_commands = []
self.raw = bytearray()
def raw_bytes(self) -> bytes:
return self.raw
class Image:
"""
This class represents the Mach-O Binary as a whole.
It's the root object in the massive tree of information we're going to build up about the binary
This class on its own does not handle populating its fields.
The Dyld class set is responsible for loading in and processing the raw values to it.
"""
def __init__(self, macho_slice: Slice):
"""
Create a MachO image
:param macho_slice: MachO Slice being processed
:type macho_slice: MachO Slice
"""
self.slice: Slice = macho_slice
if self.slice:
self.macho_header: ImageHeader = ImageHeader.from_image(macho_slice=macho_slice)
self.linked_images: List[ExternalDylib] = []
self.name = "" # TODO: Remove this field soon.
self.base_name = "" # copy of self.name
self.install_name = ""
self.segments = {}
log.debug("Initializing VM Map")
self.vm = _VirtualMemoryMap(macho_slice)
self.info: Union[dyld_info_command, None] = None
self.dylib: Union[ExternalDylib, None] = None
self.uuid = None
self.platform: PlatformType = PlatformType.UNK
self.allowed_clients: List[str] = []
self.rpath: Union[str, None] = None
self.minos = os_version(0, 0, 0)
self.sdk_version = os_version(0, 0, 0)
self.imports: List[Symbol] = []
self.exports: List[Symbol] = []
self.symbols: Dict[int, Symbol] = {}
self.import_table: Dict[int, Symbol] = {}
self.export_table: Dict[int, Symbol] = {}
self.binding_table = None
self.weak_binding_table = None
self.lazy_binding_table = None
self.export_trie: Union[ExportTrie, None] = None
self.symbol_table: Union[SymbolTable, None] = None
self.struct_cache: Dict[int, Struct] = {}
def vm_check(self, address):
return self.vm.vm_check(address)
def get_int_at(self, offset: int, length: int, vm=False, section_name=None):
"""
Get a sequence of bytes (as an int) from a location
:param offset: Offset within the image
:param length: Amount of bytes to get
:param vm: Is `offset` a VM address
:param section_name: Section Name if vm==True (improves translation time slightly)
:return: `length` Bytes at `offset`
"""
if vm:
offset = self.vm.get_file_address(offset, section_name)
return self.slice.get_int_at(offset, length)
def get_bytes_at(self, offset: int, length: int, vm=False, section_name=None):
"""
Get a sequence of bytes from a location
:param offset: Offset within the image
:param length: Amount of bytes to get
:param vm: Is `offset` a VM address
:param section_name: Section Name if vm==True (improves translation time slightly)
:return: `length` Bytes at `offset`
"""
if vm:
offset = self.vm.get_file_address(offset, section_name)
return self.slice.get_bytes_at(offset, length)
def load_struct(self, address: int, struct_type, vm=False, section_name=None, endian="little"):
"""
Load a struct (struct_type_t) from a location and return the processed object
:param address: Address to load struct from
:param struct_type: type of struct (e.g. dyld_header)
:param vm: Is `address` a VM address?
:param section_name: if `vm==True`, the section name (slightly improves translation speed)
:param endian: Endianness of bytes to read.
:return: Loaded struct
"""
if address not in self.struct_cache:
if vm:
address = self.vm.get_file_address(address, section_name)
struct = self.slice.load_struct(address, struct_type, endian)
self.struct_cache[address] = struct
return struct
return self.struct_cache[address]
def get_str_at(self, address: int, count: int, vm=False, section_name=None):
"""
Get string with set length from location (to be used essentially only for loading segment names)
:param address: Address of string start
:param count: Length of string
:param vm: Is `address` a VM address?
:param section_name: if `vm==True`, the section name (unused here, really)
:return: The loaded string.
"""
if vm:
address = self.vm.get_file_address(address, section_name)
return self.slice.get_str_at(address, count)
def get_cstr_at(self, address: int, limit: int = 0, vm=False, section_name=None):
"""
Load a C style string from a location, stopping once a null byte is encountered.
:param address: Address to load string from
:param limit: Limit of the length of bytes, 0 = unlimited
:param vm: Is `address` a VM address?
:param section_name: if `vm==True`, the section name (vastly improves VM lookup time)
:return: The loaded C string
"""
if vm:
address = self.vm.get_file_address(address, section_name)
return self.slice.get_cstr_at(address, limit)
def decode_uleb128(self, readHead: int):
"""
Decode a uleb128 integer from a location
:param readHead: Start location
:return: (end location, value)
"""
return self.slice.decode_uleb128(readHead)
class Dyld:
"""
This class takes our initialized "Image" object, parses through the raw data behind it, and fills out its properties.
"""
@classmethod
def load(cls, macho_slice: Slice, load_symtab=True, load_imports=True, load_exports=True) -> Image:
"""
Take a slice of a macho file and process it using the dyld functions
:param load_exports: Load Exports
:param load_imports: Load Imports
:param load_symtab: Load Symbol Table
:param macho_slice: Slice to load. If your image is not fat, that'll be MachOFile.slices[0]
:type macho_slice: Slice
:return: Processed image object
:rtype: Image
"""
log.info("Loading image")
image = Image(macho_slice)
log.info("Processing Load Commands")
Dyld._parse_load_commands(image, load_symtab, load_imports, load_exports)
log.info("Processing Image")
Dyld._process_image(image)
return image
@classmethod
def _parse_load_commands(cls, image: Image, load_symtab=True, load_imports=True, load_exports=True) -> None:
# noinspection PyUnusedLocal
fixups = None
log.info(f'registered {len(image.macho_header.load_commands)} Load Commands')
for cmd in image.macho_header.load_commands:
try:
load_command = LOAD_COMMAND(cmd.cmd)
except ValueError:
continue
if load_command == LOAD_COMMAND.SEGMENT_64 or load_command == LOAD_COMMAND.SEGMENT:
log.debug("Loading Segment")
segment = Segment(image, cmd)
log.info(f'Loaded Segment {segment.name}')
image.vm.add_segment(segment)
image.segments[segment.name] = segment
log.debug(f'Added {segment.name} to VM Map')
elif load_command == LOAD_COMMAND.DYLD_INFO_ONLY:
image.info = cmd
if load_imports:
log.info("Loading Binding Info")
image.binding_table = BindingTable(image, cmd.bind_off, cmd.bind_size)
image.weak_binding_table = BindingTable(image, cmd.weak_bind_off, cmd.weak_bind_size)
image.lazy_binding_table = BindingTable(image, cmd.lazy_bind_off, cmd.lazy_bind_size)
if load_exports:
log.info("Loading Export Trie")
image.export_trie = ExportTrie.from_image(image, cmd.export_off, cmd.export_size)
elif load_command == LOAD_COMMAND.LC_DYLD_EXPORTS_TRIE:
log.info("Loading Export Trie")
image.export_trie = ExportTrie.from_image(image, cmd.dataoff, cmd.datasize)
elif load_command == LOAD_COMMAND.LC_DYLD_CHAINED_FIXUPS:
log.warning(
"image uses LC_DYLD_CHAINED_FIXUPS; This is not yet supported in ktool, off-image symbol resolution (superclasses, etc) will not work")
pass
elif load_command == LOAD_COMMAND.SYMTAB:
if load_symtab:
log.info("Loading Symbol Table")
image.symbol_table = SymbolTable(image, cmd)
elif load_command == LOAD_COMMAND.DYSYMTAB:
cmd = cmd
elif load_command == LOAD_COMMAND.UUID:
image.uuid = cmd.uuid.to_bytes(16, "little")
log.info(f'image UUID: {image.uuid}')
elif load_command == LOAD_COMMAND.SUB_CLIENT:
string = image.get_cstr_at(cmd.off + cmd.offset)
image.allowed_clients.append(string)
log.debug(f'Loaded Subclient "{string}"')
elif load_command == LOAD_COMMAND.RPATH:
string = image.get_cstr_at(cmd.off + cmd.path)
image.rpath = string
log.info(f'image Resource Path: {string}')
elif load_command == LOAD_COMMAND.BUILD_VERSION:
image.platform = PlatformType(cmd.platform)
image.minos = os_version(x=image.get_int_at(cmd.off + 14, 2), y=image.get_int_at(cmd.off + 13, 1),
z=image.get_int_at(cmd.off + 12, 1))
image.sdk_version = os_version(x=image.get_int_at(cmd.off + 18, 2),
y=image.get_int_at(cmd.off + 17, 1),
z=image.get_int_at(cmd.off + 16, 1))
log.info(f'Loaded platform {image.platform.name} | '
f'Minimum OS {image.minos.x}.{image.minos.y}'
f'.{image.minos.z} | SDK Version {image.sdk_version.x}'
f'.{image.sdk_version.y}.{image.sdk_version.z}')
elif isinstance(cmd, version_min_command):
# Only override this if it wasn't set by build_version
if image.platform == PlatformType.UNK:
if load_command == LOAD_COMMAND.VERSION_MIN_MACOSX:
image.platform = PlatformType.MACOS
elif load_command == LOAD_COMMAND.VERSION_MIN_IPHONEOS:
image.platform = PlatformType.IOS
elif load_command == LOAD_COMMAND.VERSION_MIN_TVOS:
image.platform = PlatformType.TVOS
elif load_command == LOAD_COMMAND.VERSION_MIN_WATCHOS:
image.platform = PlatformType.WATCHOS
image.minos = os_version(x=image.get_int_at(cmd.off + 10, 2),
y=image.get_int_at(cmd.off + 9, 1),
z=image.get_int_at(cmd.off + 8, 1))
elif load_command == LOAD_COMMAND.ID_DYLIB:
image.dylib = ExternalDylib(image, cmd)
log.info(f'Loaded local dylib_command with install_name {image.dylib.install_name}')
elif isinstance(cmd, dylib_command):
# noinspection PyTypeChecker
external_dylib = ExternalDylib(image, cmd)
image.linked_images.append(external_dylib)
log.info(f'Loaded linked dylib_command with install name {external_dylib.install_name}')
@staticmethod
def _process_image(image: Image) -> None:
"""
Once all load commands have been processed, process the results.
This is mainly for things which need to be done once *all* lcs have been processed.
:param image:
:return:
"""
if image.dylib is not None:
image.name = image.dylib.install_name.split('/')[-1]
image.base_name = image.dylib.install_name.split('/')[-1]
image.install_name = image.dylib.install_name
else:
image.name = ""
image.base_name = image.slice.macho_file.filename
image.install_name = ""
if image.export_trie:
for symbol in image.export_trie.symbols:
image.exports.append(symbol)
image.export_table[symbol.address] = symbol
if image.binding_table:
for symbol in image.binding_table.symbol_table:
symbol.attr = ''
image.imports.append(symbol)
image.import_table[symbol.address] = symbol
for symbol in image.weak_binding_table.symbol_table:
symbol.attr = 'Weak'
image.imports.append(symbol)
image.import_table[symbol.address] = symbol
for symbol in image.lazy_binding_table.symbol_table:
symbol.attr = 'Lazy'
image.imports.append(symbol)
image.import_table[symbol.address] = symbol
if image.symbol_table:
for symbol in image.symbol_table.table:
image.symbols[symbol.address] = symbol
class LD64:
@classmethod
def insert_load_cmd(cls, image: Image, lc, fields, index=-1):
lc_type = LOAD_COMMAND_MAP[lc]
load_cmd = Struct.create_with_values(lc_type, [lc.value, lc_type.SIZE] + fields)
off = dyld_header.SIZE
off += image.macho_header.dyld_header.loadsize
raw = load_cmd.raw
size = len(load_cmd.raw)
if index != -1:
b_load_cmd = image.macho_header.load_commands[index - 1]
off = b_load_cmd.off + b_load_cmd.cmdsize
after_bytes = image.macho_header.raw_bytes()[off:image.macho_header.dyld_header.loadsize + 32]
image.slice.patch(off, raw)
image.slice.patch(off + size, after_bytes)
else:
image.slice.patch(off, raw)
image.macho_header.load_commands.append(load_cmd)
image.macho_header.dyld_header.loadcnt += 1
image.macho_header.dyld_header.loadsize += size
image.slice.patch(image.macho_header.dyld_header.off, image.macho_header.dyld_header.raw)
@classmethod
def insert_load_cmd_with_str(cls, image: Image, lc, fields, suffix, index=-1):
lc_type = LOAD_COMMAND_MAP[lc]
load_cmd = Struct.create_with_values(lc_type, [lc.value, lc_type.SIZE] + fields)
log.debug(f'Fabricated Load Command {str(load_cmd)}')
encoded = suffix.encode('utf-8') + b'\x00'
cmd_size = lc_type.SIZE
cmd_size += len(encoded)
cmd_size = 0x8 * math.ceil(cmd_size / 0x8)
log.debug_tm(f'Computed cmd size (w/ pad) Size of {cmd_size}')
load_cmd.cmdsize = cmd_size
off = dyld_header.SIZE
off += image.macho_header.dyld_header.loadsize
raw = load_cmd.raw + encoded + (b'\x00' * (cmd_size - (lc_type.SIZE + len(encoded))))
log.debug_tm(f'Padding Size {(cmd_size - (lc_type.SIZE + len(encoded)))}')
size = len(raw)
if index != -1:
b_load_cmd = image.macho_header.load_commands[index - 1]
off = b_load_cmd.off + b_load_cmd.cmdsize
after_bytes = image.macho_header.raw_bytes()[off:image.macho_header.dyld_header.loadsize + 32]
image.slice.patch(off, raw)
image.slice.patch(off + size, after_bytes)
image.macho_header.load_commands.insert(index, load_cmd)
else:
image.slice.patch(off, raw)
image.macho_header.load_commands.append(load_cmd)
image.macho_header.dyld_header.loadcnt += 1
image.macho_header.dyld_header.loadsize -= size
image.slice.patch(image.macho_header.dyld_header.off, image.macho_header.dyld_header.raw)
@classmethod
def remove_load_command(cls, image: Image, index):
b_load_cmd = image.macho_header.load_commands.pop(index)
off = b_load_cmd.off + b_load_cmd.cmdsize
after_bytes = image.macho_header.raw_bytes()[off:image.macho_header.dyld_header.loadsize + 32]
image.slice.patch(b_load_cmd.off, after_bytes)
image.slice.patch(image.macho_header.dyld_header.loadsize + 32 - b_load_cmd.cmdsize, b'\x00' * b_load_cmd.cmdsize)
image.macho_header.dyld_header.loadcnt -= 1
image.macho_header.dyld_header.loadsize -= b_load_cmd.cmdsize
image.slice.patch(image.macho_header.dyld_header.off, image.macho_header.dyld_header.raw)
class ExternalDylib:
def __init__(self, source_image: Image, cmd):
self.cmd = cmd
self.source_image = source_image
self.install_name = self._get_name(cmd)
self.weak = cmd.cmd == 0x18 | 0x80000000
self.local = cmd.cmd == 0xD
def _get_name(self, cmd) -> str:
read_address = cmd.off + dylib_command.SIZE
return self.source_image.get_cstr_at(read_address)
os_version = namedtuple("os_version", ["x", "y", "z"])
class PlatformType(Enum):
MACOS = 1
IOS = 2
TVOS = 3
WATCHOS = 4
BRIDGE_OS = 5
MAC_CATALYST = 6
IOS_SIMULATOR = 7
TVOS_SIMULATOR = 8
WATCHOS_SIMULATOR = 9
DRIVER_KIT = 10
UNK = 64
class ToolType(Enum):
CLANG = 1
SWIFT = 2
LD = 3
class SymbolType(Enum):
CLASS = 0
METACLASS = 1
IVAR = 2
FUNC = 3
UNK = 4
class Symbol(Constructable):
"""
This class can represent several types of symbols.
"""
@classmethod
def from_image(cls, image, cmd, entry):
fullname = image.get_cstr_at(entry.str_index + cmd.stroff)
addr = entry.value
symbol = cls.from_values(fullname, addr)
N_STAB = 0xe0
N_PEXT = 0x10
N_TYPE = 0x0e
N_EXT = 0x01
type_masked = N_TYPE & entry.type
for name, flag in {'N_UNDF':0x0, 'N_ABS': 0x2, 'N_SECT':0xe, 'N_PBUD':0xc, 'N_INDR':0xa}.items():
if type_masked & flag:
symbol.types.append(name)
if entry.type & N_EXT:
symbol.external = True
return symbol
@classmethod
def from_values(cls, fullname, value, external=False, ordinal=0):
if '_$_' in fullname:
if fullname.startswith('_OBJC_CLASS_$'):
dec_type = SymbolType.CLASS
elif fullname.startswith('_OBJC_METACLASS_$'):
dec_type = SymbolType.METACLASS
elif fullname.startswith('_OBJC_IVAR_$'):
dec_type = SymbolType.IVAR
else:
dec_type = SymbolType.UNK
name = fullname.split('$')[1]
else:
name = fullname
dec_type = SymbolType.FUNC
return cls(fullname, name=name, dec_type=dec_type, external=external, value=value, ordinal=ordinal)
def raw_bytes(self):
pass
def __init__(self, fullname=None, name=None, dec_type=None, external=False, value=0, ordinal=0):
self.fullname = fullname
self.name = name
self.dec_type = dec_type
self.address = value
self.entry = None
self.ordinal = ordinal
self.types = []
self.external = external
self.attr = None
class SymbolTable:
"""
This class represents the symbol table declared in the MachO File
.table contains the symbol table
.ext contains exported symbols, i think?
This class is incomplete
"""
def __init__(self, image: Image, cmd: symtab_command):
self.image: Image = image
self.cmd: symtab_command = cmd
self.ext: List[Symbol] = []
self.table: List[Symbol] = self._load_symbol_table()
def _load_symbol_table(self) -> List[Symbol]:
symbol_table = []
read_address = self.cmd.symoff
for i in range(0, self.cmd.nsyms):
typing = symtab_entry if self.image.macho_header.is64 else symtab_entry_32
entry = self.image.load_struct(read_address + typing.SIZE * i, typing)
symbol_table.append(entry)
log.debug_tm(str(entry))
table = []
for sym in symbol_table:
symbol = Symbol.from_image(self.image, self.cmd, sym)
log.debug_tm(f'Symbol Table: Loaded symbol:{symbol.name} ordinal:{symbol.ordinal} type:{symbol.dec_type}')
table.append(symbol)
if sym.type == 0xf:
self.ext.append(symbol)
return table
export_node = namedtuple("export_node", ['text', 'offset', 'flags'])
class ExportTrie(Constructable):
@classmethod
def from_image(cls, image: Image, export_start: int, export_size: int) -> 'ExportTrie':
trie = ExportTrie()
endpoint = export_start + export_size
nodes = ExportTrie.read_node(image, export_start, '', export_start, endpoint)
symbols = []
for node in nodes:
if node.text:
symbols.append(Symbol.from_values(node.text, node.offset, False))
trie.nodes = nodes
trie.symbols = symbols
trie.raw = image.get_bytes_at(export_start, export_size)
return trie
@classmethod
def from_values(cls, *args, **kwargs):
pass
def raw_bytes(self):
return self.raw
def __init__(self):
self.raw = bytearray()
self.nodes: List[export_node] = []
self.symbols: List[Symbol] = []
@classmethod
def read_node(cls, image: Image, trie_start: int, string: str, cursor: int, endpoint: int) -> List[export_node]:
if cursor > endpoint:
log.error("Node offset greater than size of export trie")
macho_is_malformed()
start = cursor
byte = image.get_int_at(cursor, 1)
results = []
log.debug_tm(f'@ {hex(start)} node: {hex(byte)} current_symbol: {string}')
if byte == 0:
cursor += 1
branches = image.get_int_at(cursor, 1)
log.debug_tm(f'BRAN {branches}')
for i in range(0, branches):
if i == 0:
cursor += 1
proc_str = image.get_cstr_at(cursor)
cursor += len(proc_str) + 1
offset, cursor = image.decode_uleb128(cursor)
log.debug_tm(f'({i}) string: {string + proc_str} next_node: {hex(trie_start + offset)}')
results += ExportTrie.read_node(image, trie_start, string + proc_str, trie_start + offset, endpoint)
else:
log.debug_tm(f'TERM: 0')
size, cursor = image.decode_uleb128(cursor)
flags = image.get_int_at(cursor, 1)
cursor += 1
offset, cursor = image.decode_uleb128(cursor)
results.append(export_node(string, offset, flags))
return results
action = namedtuple("action", ["vmaddr", "libname", "item"])
record = namedtuple("record", [
"off",
"seg_index",
"seg_offset",
"lib_ordinal",
"type",
"flags",
"name",
"addend",
"special_dylib"
])
class BindingTable:
"""
The binding table contains a ton of information related to the binding info in the image
.lookup_table - Contains a map of address -> Symbol declarations which should be used for processing off-image
symbol decorations
.symbol_table - Contains a full list of symbols declared in the binding info. Avoid iterating through this for
speed purposes.
.actions - contains a list of, you guessed it, actions.
.import_stack - contains a fairly raw unprocessed list of binding info commands
"""
def __init__(self, image: Image, table_start: int, table_size: int):
"""
Pass a image to be processed
:param image: image to be processed
:type image: Image
"""
self.image = image
self.import_stack = self._load_binding_info(table_start, table_size)
self.actions = self._create_action_list()
self.lookup_table = {}
self.link_table = {}
self.symbol_table = self._load_symbol_table()
def _load_symbol_table(self) -> List[Symbol]:
table = []
for act in self.actions:
if act.item:
sym = Symbol.from_values(act.item, act.vmaddr, external=True, ordinal=act.libname)
table.append(sym)
self.lookup_table[act.vmaddr] = sym
return table
def _create_action_list(self) -> List[action]:
actions = []
for bind_command in self.import_stack:
segment = list(self.image.segments.values())[bind_command.seg_index]
vm_address = segment.vm_address + bind_command.seg_offset
try:
lib = self.image.linked_images[bind_command.lib_ordinal - 1].install_name
except IndexError:
lib = str(bind_command.lib_ordinal)
item = bind_command.name
actions.append(action(vm_address & 0xFFFFFFFFF, lib, item))
return actions
def _load_binding_info(self, table_start: int, table_size: int) -> List[record]:
read_address = table_start
import_stack = []
threaded_stack = []
uses_threaded_bind = False
while True:
if read_address - table_size >= table_start:
break
seg_index = 0x0
seg_offset = 0x0
lib_ordinal = 0x0
btype = 0x0
flags = 0x0
name = ""
addend = 0x0
special_dylib = 0x0
while True:
# There are 0xc opcodes total
# Bitmask opcode byte with 0xF0 to get opcode, 0xF to get value
binding_opcode = self.image.get_int_at(read_address, 1) & 0xF0
value = self.image.get_int_at(read_address, 1) & 0x0F
log.debug_tm(f'{BINDING_OPCODE(binding_opcode).name}: {hex(value)}')
cmd_start_addr = read_address
read_address += 1
if binding_opcode == BINDING_OPCODE.THREADED:
if value == BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB:
a_table_size, read_address = self.image.decode_uleb128(read_address)
uses_threaded_bind = True
elif value == BIND_SUBOPCODE_THREADED_APPLY:
pass
if binding_opcode == BINDING_OPCODE.DONE:
import_stack.append(
record(cmd_start_addr, seg_index, seg_offset, lib_ordinal, btype, flags, name, addend,
special_dylib))
break
elif binding_opcode == BINDING_OPCODE.SET_DYLIB_ORDINAL_IMM:
lib_ordinal = value
elif binding_opcode == BINDING_OPCODE.SET_DYLIB_ORDINAL_ULEB:
lib_ordinal, read_address = self.image.decode_uleb128(read_address)
elif binding_opcode == BINDING_OPCODE.SET_DYLIB_SPECIAL_IMM:
special_dylib = 0x1
lib_ordinal = value
elif binding_opcode == BINDING_OPCODE.SET_SYMBOL_TRAILING_FLAGS_IMM:
flags = value
name = self.image.get_cstr_at(read_address)
read_address += len(name) + 1
elif binding_opcode == BINDING_OPCODE.SET_TYPE_IMM:
btype = value
elif binding_opcode == BINDING_OPCODE.SET_ADDEND_SLEB:
addend, read_address = self.image.decode_uleb128(read_address)
elif binding_opcode == BINDING_OPCODE.SET_SEGMENT_AND_OFFSET_ULEB:
seg_index = value
seg_offset, read_address = self.image.decode_uleb128(read_address)
elif binding_opcode == BINDING_OPCODE.ADD_ADDR_ULEB:
o, read_address = self.image.decode_uleb128(read_address)
seg_offset += o
elif binding_opcode == BINDING_OPCODE.DO_BIND_ADD_ADDR_ULEB:
import_stack.append(
record(cmd_start_addr, seg_index, seg_offset, lib_ordinal, btype, flags, name, addend,
special_dylib))
seg_offset += 8
o, read_address = self.image.decode_uleb128(read_address)
seg_offset += o
elif binding_opcode == BINDING_OPCODE.DO_BIND_ADD_ADDR_IMM_SCALED:
import_stack.append(
record(cmd_start_addr, seg_index, seg_offset, lib_ordinal, btype, flags, name, addend,
special_dylib))
seg_offset = seg_offset + (value * 8) + 8
elif binding_opcode == BINDING_OPCODE.DO_BIND_ULEB_TIMES_SKIPPING_ULEB:
count, read_address = self.image.decode_uleb128(read_address)
skip, read_address = self.image.decode_uleb128(read_address)
for i in range(0, count):
import_stack.append(
record(cmd_start_addr, seg_index, seg_offset, lib_ordinal, btype, flags, name, addend,
special_dylib))
seg_offset += skip + 8
elif binding_opcode == BINDING_OPCODE.DO_BIND:
if not uses_threaded_bind:
import_stack.append(
record(cmd_start_addr, seg_index, seg_offset, lib_ordinal, btype, flags, name, addend,
special_dylib))
seg_offset += 8
else:
threaded_stack.append(
record(cmd_start_addr, seg_index, seg_offset, lib_ordinal, btype, flags, name, addend,
special_dylib))
seg_offset += 8
return import_stack
| 37.805094 | 156 | 0.586971 |
c399e72d971d861feeab109c2bba71a731c2c043 | 15,811 | py | Python | pymatgen/analysis/ferroelectricity/polarization.py | mailhexu/pymatgen | 70da55dd860771eb9d38c306dbcd3f6b074b7a54 | [
"MIT"
] | 18 | 2019-06-15T18:08:21.000Z | 2022-01-30T05:01:29.000Z | ComRISB/pyextern/pymatgen/pymatgen/analysis/ferroelectricity/polarization.py | comscope/Comsuite | b80ca9f34c519757d337487c489fb655f7598cc2 | [
"BSD-3-Clause"
] | null | null | null | ComRISB/pyextern/pymatgen/pymatgen/analysis/ferroelectricity/polarization.py | comscope/Comsuite | b80ca9f34c519757d337487c489fb655f7598cc2 | [
"BSD-3-Clause"
] | 11 | 2019-06-05T02:57:55.000Z | 2021-12-29T02:54:25.000Z | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals
from __future__ import absolute_import
import os
from math import *
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.outputs import Outcar
from pymatgen.core.sites import PeriodicSite
from pymatgen.io.cif import CifWriter
from pymatgen.core.lattice import Lattice
import numpy as np
import yaml
"""
This module provides the classes needed to analyze the change in polarization
from a nonpolar reference phase to a polar ferroelectric phase.
"""
__author__ = "Tess Smidt"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "1.0"
__email__ = "tsmidt@berkeley.edu"
__status__ = "Development"
__date__ = "April 15, 2017"
"""
This module contains classes useful for analyzing ferroelectric candidates.
The Polarization class can recover the spontaneous polarization using
multiple calculations along a nonpolar to polar ferroelectric distortion.
The EnergyTrend class is useful for assessing the trend in energy across
the distortion.
See Nicola Spaldin's "A beginner's guide to the modern theory of polarization"
(https://arxiv.org/abs/1202.1831) for an introduction to crystal polarization.
We recommend using our calc_ionic function for calculating the ionic
polarization rather than the values from OUTCAR.
We find that the ionic dipole moment reported in OUTCAR differ from
the naive calculation of \\sum_i Z_i r_i where i is the index of the
atom, Z_i is the ZVAL from the pseudopotential file, and r is the distance
in Angstroms along the lattice vectors.
Compare calc_ionic to VASP dipol.F. SUBROUTINE POINT_CHARGE_DIPOL.
We are able to recover a smooth same branch polarization more frequently
using the naive calculation in calc_ionic than using the ionic dipole
moment reported in the OUTCAR.
Some defintions of terms used in the comments below:
A polar structure belongs to a polar space group. A polar space group has a
one of the 10 polar point group:
(1, 2, m, mm2, 4, 4mm, 3, 3m, 6, 6m)
Being nonpolar is not equivalent to being centrosymmetric (having inversion
symmetry). For example, any space group with point group 222 is nonpolar but
not centrosymmetric.
By symmetry the polarization of a nonpolar material modulo the quantum
of polarization can only be zero or 1/2. We use a nonpolar structure to help
determine the spontaneous polarization because it serves as a reference point.
"""
def zval_dict_from_potcar(potcar):
"""
Creates zval_dictionary for calculating the ionic polarization from
Potcar object
potcar: Potcar object
"""
zval_dict = {}
for p in potcar:
zval_dict.update({p.element: p.ZVAL})
return zval_dict
def calc_ionic(site, structure, zval):
"""
Calculate the ionic dipole moment using ZVAL from pseudopotential
site: PeriodicSite
structure: Structure
zval: Charge value for ion (ZVAL for VASP pseudopotential)
Returns polarization in electron Angstroms.
"""
norms = structure.lattice.lengths_and_angles[0]
return np.multiply(norms, -site.frac_coords * zval)
def get_total_ionic_dipole(structure, zval_dict):
"""
Get the total ionic dipole moment for a structure.
structure: pymatgen Structure
zval_dict: specie, zval dictionary pairs
center (np.array with shape [3,1]) : dipole center used by VASP
tiny (float) : tolerance for determining boundary of calculation.
"""
tot_ionic = []
for site in structure:
zval = zval_dict[str(site.specie)]
tot_ionic.append(calc_ionic(site, structure, zval))
return np.sum(tot_ionic, axis=0)
class PolarizationLattice(Structure):
def get_nearest_site(self, coords, site, r=None):
"""
Given coords and a site, find closet site to coords.
Args:
coords (3x1 array): cartesian coords of center of sphere
site: site to find closest to coords
r: radius of sphere. Defaults to diagonal of unit cell
Returns:
Closest site and distance.
"""
index = self.index(site)
if r is None:
r = np.linalg.norm(np.sum(np.matrix(self.lattice.matrix), axis=0))
ns = self.get_sites_in_sphere(coords, r, include_index=True)
# Get sites with identical index to site
ns = [n for n in ns if n[2] == index]
# Sort by distance to coords
ns.sort(key=lambda x: x[1])
# Return PeriodicSite and distance of closest image
return ns[0][0:2]
class Polarization(object):
"""
Class for recovering the same branch polarization for a set of
polarization calculations along the nonpolar - polar distortion
path of a ferroelectric.
p_elecs, p_ions, and structures lists should be given in order
of nonpolar to polar! For example, the structures returned from:
nonpolar.interpolate(polar,interpolate_lattices=True)
if nonpolar is the nonpolar Structure and polar is the polar structure.
It is assumed that the electronic and ionic dipole moment values
are given in electron Angstroms along the three lattice directions
(a,b,c).
"""
def __init__(self, p_elecs, p_ions, structures):
if len(p_elecs) != len(p_ions) or len(p_elecs) != len(structures):
raise ValueError(
"The number of electronic polarization and ionic polarization values must be equal.")
self.p_elecs = np.matrix(p_elecs)
self.p_ions = np.matrix(p_ions)
self.structures = structures
@classmethod
def from_outcars_and_structures(cls, outcars, structures,
calc_ionic_from_zval=False):
"""
Create Polarization object from list of Outcars and Structures in order
of nonpolar to polar.
Note, we recommend calculating the ionic dipole moment using calc_ionic
than using the values in Outcar (see module comments). To do this set
calc_ionic_from_zval = True
"""
p_elecs = []
p_ions = []
for i, o in enumerate(outcars):
p_elecs.append(o.p_elec)
if calc_ionic_from_zval:
p_ions.append(
get_total_ionic_dipole(structures[i], o.zval_dict))
else:
p_ions.append(o.p_ion)
return cls(p_elecs, p_ions, structures)
def get_pelecs_and_pions(self, convert_to_muC_per_cm2=False):
"""
Get the electronic and ionic dipole moments / polarizations.
convert_to_muC_per_cm2: Convert from electron * Angstroms to microCoulomb
per centimeter**2
"""
if not convert_to_muC_per_cm2:
return self.p_elecs, self.p_ions
if convert_to_muC_per_cm2:
p_elecs = np.matrix(self.p_elecs).T
p_ions = np.matrix(self.p_ions).T
volumes = [s.lattice.volume for s in self.structures]
e_to_muC = -1.6021766e-13
cm2_to_A2 = 1e16
units = 1.0 / np.matrix(volumes)
units *= e_to_muC * cm2_to_A2
p_elecs = np.multiply(units, p_elecs)
p_ions = np.multiply(units, p_ions)
p_elecs, p_ions = p_elecs.T, p_ions.T
return p_elecs, p_ions
def get_same_branch_polarization_data(self, convert_to_muC_per_cm2=False):
"""
Get same branch dipole moment (convert_to_muC_per_cm2=False)
or polarization for given polarization data (convert_to_muC_per_cm2=True).
Polarization is a lattice vector, meaning it is only defined modulo the
quantum of polarization:
P = P_0 + \\sum_i \\frac{n_i e R_i}{\\Omega}
where n_i is an integer, e is the charge of the electron in microCoulombs,
R_i is a lattice vector, and \\Omega is the unit cell volume in cm**3
(giving polarization units of microCoulomb per centimeter**2).
The quantum of the dipole moment in electron Angstroms (as given by VASP) is:
\\sum_i n_i e R_i
where e, the electron charge, is 1 and R_i is a lattice vector, and n_i is an integer.
Given N polarization calculations in order from nonpolar to polar, this algorithm
minimizes the distance between adjacent polarization images. To do this, it
constructs a polarization lattice for each polarization calculation using the
pymatgen.core.structure class and calls the get_nearest_site method to find the
image of a given polarization lattice vector that is closest to the previous polarization
lattice vector image.
convert_to_muC_per_cm2: convert polarization from electron * Angstroms to
microCoulomb per centimeter**2
"""
p_elec, p_ion = self.get_pelecs_and_pions()
p_tot = p_elec + p_ion
p_tot = np.matrix(p_tot)
lattices = [s.lattice for s in self.structures]
volumes = np.matrix([s.lattice.volume for s in self.structures])
L = len(p_elec)
# convert polarizations and lattice lengths prior to adjustment
if convert_to_muC_per_cm2:
e_to_muC = -1.6021766e-13
cm2_to_A2 = 1e16
units = 1.0 / np.matrix(volumes)
units *= e_to_muC * cm2_to_A2
# Convert the total polarization
p_tot = np.multiply(units.T, p_tot)
# adjust lattices
for i in range(L):
lattice = lattices[i]
l, a = lattice.lengths_and_angles
lattices[i] = Lattice.from_lengths_and_angles(
np.array(l) * units.A1[i], a)
d_structs = []
sites = []
for i in range(L):
l = lattices[i]
frac_coord = np.divide(np.matrix(p_tot[i]),
np.matrix([l.a, l.b, l.c]))
d = PolarizationLattice(l, ["C"], [np.matrix(frac_coord).A1])
d_structs.append(d)
site = d[0]
if i == 0:
# Adjust nonpolar polarization to be closest to zero.
# This is compatible with both a polarization of zero or a half quantum.
prev_site = [0, 0, 0]
else:
prev_site = sites[-1].coords
new_site = d.get_nearest_site(prev_site, site)
sites.append(new_site[0])
adjust_pol = []
for s, d in zip(sites, d_structs):
l = d.lattice
adjust_pol.append(
np.multiply(s.frac_coords, np.matrix([l.a, l.b, l.c])).A1)
adjust_pol = np.matrix(adjust_pol)
return adjust_pol
def get_lattice_quanta(self, convert_to_muC_per_cm2=True):
"""
Returns the dipole / polarization quanta along a, b, and c for
all structures.
"""
lattices = [s.lattice for s in self.structures]
volumes = np.matrix([s.lattice.volume for s in self.structures])
L = len(self.structures)
# convert polarizations and lattice lengths prior to adjustment
if convert_to_muC_per_cm2:
e_to_muC = -1.6021766e-13
cm2_to_A2 = 1e16
units = 1.0 / np.matrix(volumes)
units *= e_to_muC * cm2_to_A2
# adjust lattices
for i in range(L):
lattice = lattices[i]
l, a = lattice.lengths_and_angles
lattices[i] = Lattice.from_lengths_and_angles(
np.array(l) * units.A1[i], a)
quanta = np.matrix(
[np.array(l.lengths_and_angles[0]) for l in lattices])
return quanta
def get_polarization_change(self):
"""
Get difference between nonpolar and polar same branch polarization.
"""
tot = self.get_same_branch_polarization_data(
convert_to_muC_per_cm2=True)
return tot[-1] - tot[0]
def get_polarization_change_norm(self):
"""
Get magnitude of difference between nonpolar and polar same branch
polarization.
"""
polar = self.structures[-1]
a, b, c = polar.lattice.matrix
a, b, c = a / np.linalg.norm(a), b / np.linalg.norm(
b), c / np.linalg.norm(c)
P = self.get_polarization_change().A1
P_norm = np.linalg.norm(a * P[0] + b * P[1] + c * P[2])
return P_norm
def same_branch_splines(self):
"""
Fit splines to same branch polarization. This is used to assess any jumps
in the same branch polarizaiton.
"""
from scipy.interpolate import UnivariateSpline
tot = self.get_same_branch_polarization_data(
convert_to_muC_per_cm2=True)
L = tot.shape[0]
try:
sp_a = UnivariateSpline(range(L), tot[:, 0].A1)
except:
sp_a = None
try:
sp_b = UnivariateSpline(range(L), tot[:, 1].A1)
except:
sp_b = None
try:
sp_c = UnivariateSpline(range(L), tot[:, 2].A1)
except:
sp_c = None
return sp_a, sp_b, sp_c
def max_spline_jumps(self):
"""
Get maximum difference between spline and same branch polarization data.
"""
tot = self.get_same_branch_polarization_data(
convert_to_muC_per_cm2=True)
sps = self.same_branch_splines()
max_jumps = [None, None, None]
for i, sp in enumerate(sps):
if sp != None:
max_jumps[i] = max(tot[:, i].A1 - sp(range(len(tot[:, i].A1))))
return max_jumps
def smoothness(self):
"""
Get rms average difference between spline and same branch polarization data.
"""
tot = self.get_same_branch_polarization_data(
convert_to_muC_per_cm2=True)
L = tot.shape[0]
try:
sp = self.same_branch_splines()
except:
print("Something went wrong.")
return None
sp_latt = [sp[i](range(L)) for i in range(3)]
diff = [sp_latt[i] - tot[:, i].A1 for i in range(3)]
rms = [np.sqrt(np.sum(np.square(diff[i])) / L) for i in range(3)]
return rms
class EnergyTrend(object):
def __init__(self, energies):
self.energies = energies
def spline(self):
"""
Fit spline to energy trend data.
"""
from scipy.interpolate import UnivariateSpline
sp = UnivariateSpline(range(len(self.energies)), self.energies, k=4)
return sp
def smoothness(self):
"""
Get rms average difference between spline and energy trend.
"""
energies = self.energies
try:
sp = self.spline()
except:
print("Energy spline failed.")
return None
spline_energies = sp(range(len(energies)))
diff = spline_energies - energies
rms = np.sqrt(np.sum(np.square(diff)) / len(energies))
return rms
def max_spline_jump(self):
"""
Get maximum difference between spline and energy trend.
"""
sp = self.spline()
return max(self.energies - sp(range(len(self.energies))))
def endpoints_minima(self, slope_cutoff=5e-3):
"""
Test if spline endpoints are at minima for a given slope cutoff.
"""
energies = self.energies
try:
sp = self.spline()
except:
print("Energy spline failed.")
return None
der = sp.derivative()
der_energies = der(range(len(energies)))
return {"polar": abs(der_energies[-1]) <= slope_cutoff,
"nonpolar": abs(der_energies[0]) <= slope_cutoff}
| 35.292411 | 101 | 0.63342 |
633b1c0a90c76618088fdbec5e508240088d9487 | 533 | py | Python | python/demos/minimize_demo.py | qyxiao/pmt | 87513794fc43f8aa1f4f3d7588fa45ffc75d1a44 | [
"MIT"
] | null | null | null | python/demos/minimize_demo.py | qyxiao/pmt | 87513794fc43f8aa1f4f3d7588fa45ffc75d1a44 | [
"MIT"
] | null | null | null | python/demos/minimize_demo.py | qyxiao/pmt | 87513794fc43f8aa1f4f3d7588fa45ffc75d1a44 | [
"MIT"
] | null | null | null | import autograd
import autograd.numpy as np
from scipy.optimize import minimize
count = 0
def fake(x):
global count
print count
count += 1
return x ** 4 + 10 * x ** 3 + 4 * x ** 2 + 7 * x + 1
obj_fun = fake
grad_fun = autograd.grad(obj_fun)
params = -1
num_iters = 20
result = minimize(obj_fun, params, method='BFGS', jac=grad_fun,
options = {'maxiter':num_iters, 'disp':True})
# each gradient computation calls the function (because of autograd)
assert(result.nfev + result.njev == count) | 25.380952 | 68 | 0.654784 |
70050cf8ba99cdfa3c3080ebfdffcec794dbcdb3 | 2,941 | py | Python | test/cpp/api/optim_baseline.py | DavidKo3/mctorch | 53ffe61763059677978b4592c8b2153b0c15428f | [
"BSD-3-Clause"
] | 1 | 2019-07-21T02:13:22.000Z | 2019-07-21T02:13:22.000Z | test/cpp/api/optim_baseline.py | DavidKo3/mctorch | 53ffe61763059677978b4592c8b2153b0c15428f | [
"BSD-3-Clause"
] | null | null | null | test/cpp/api/optim_baseline.py | DavidKo3/mctorch | 53ffe61763059677978b4592c8b2153b0c15428f | [
"BSD-3-Clause"
] | null | null | null | """Script to generate baseline values from PyTorch optimization algorithms"""
import argparse
import math
import torch
import torch.optim
HEADER = """
#include <torch/tensor.h>
#include <vector>
namespace expected_parameters {
"""
FOOTER = "} // namespace expected_parameters"
PARAMETERS = "static std::vector<std::vector<torch::Tensor>> {} = {{"
OPTIMIZERS = {
"Adam": lambda p: torch.optim.Adam(p, 1.0, weight_decay=1e-6),
"Adagrad": lambda p: torch.optim.Adagrad(p, 1.0, weight_decay=1e-6, lr_decay=1e-3),
"RMSprop": lambda p: torch.optim.RMSprop(p, 0.1, momentum=0.9, weight_decay=1e-6),
"SGD": lambda p: torch.optim.SGD(p, 0.1, momentum=0.9, weight_decay=1e-6),
}
def weight_init(module):
if isinstance(module, torch.nn.Linear):
stdev = 1.0 / math.sqrt(module.weight.size(1))
for p in module.parameters():
p.data.uniform_(-stdev, stdev)
def run(optimizer_name, iterations, sample_every):
torch.manual_seed(0)
model = torch.nn.Sequential(
torch.nn.Linear(2, 3),
torch.nn.Sigmoid(),
torch.nn.Linear(3, 1),
torch.nn.Sigmoid(),
)
model = model.to(torch.float64).apply(weight_init)
optimizer = OPTIMIZERS[optimizer_name](model.parameters())
input = torch.tensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]], dtype=torch.float64)
values = []
for i in range(iterations):
optimizer.zero_grad()
output = model.forward(input)
loss = output.sum()
loss.backward()
optimizer.step()
if i % sample_every == 0:
values.append(
[p.clone().flatten().data.numpy() for p in model.parameters()]
)
return values
def emit(optimizer_parameter_map):
# Don't write generated with an @ in front, else this file is recognized as generated.
print("// @{} from {}".format('generated', __file__))
print(HEADER)
for optimizer_name, parameters in optimizer_parameter_map.items():
print(PARAMETERS.format(optimizer_name))
for sample in parameters:
print(" {")
for parameter in sample:
parameter_values = "{{{}}}".format(", ".join(map(str, parameter)))
print(" torch::tensor({}),".format(parameter_values))
print(" },")
print("};\n")
print(FOOTER)
def main():
parser = argparse.ArgumentParser(
"Produce optimization output baseline from PyTorch"
)
parser.add_argument("-i", "--iterations", default=1001, type=int)
parser.add_argument("-s", "--sample-every", default=100, type=int)
options = parser.parse_args()
optimizer_parameter_map = {}
for optimizer in ["Adam", "Adagrad", "RMSprop", "SGD"]:
optimizer_parameter_map[optimizer] = run(
optimizer, options.iterations, options.sample_every
)
emit(optimizer_parameter_map)
if __name__ == "__main__":
main()
| 28.009524 | 90 | 0.622917 |
9406ab116b5f89a5b5b6602e72788787d1f34f11 | 4,662 | py | Python | validictory/tests/test_fail_fast.py | netsyno/validictory | dd683aee108b79ad3e07b861719e71470a0ae4b2 | [
"MIT"
] | 1 | 2016-03-27T19:42:39.000Z | 2016-03-27T19:42:39.000Z | validictory/tests/test_fail_fast.py | netsyno/validictory | dd683aee108b79ad3e07b861719e71470a0ae4b2 | [
"MIT"
] | null | null | null | validictory/tests/test_fail_fast.py | netsyno/validictory | dd683aee108b79ad3e07b861719e71470a0ae4b2 | [
"MIT"
] | null | null | null | from unittest import TestCase
import validictory
class TestFailFast(TestCase):
def test_multi_error(self):
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"}
},
}
data = {"name": 2, "age": "fourty-two"}
# ensure it raises an error
self.assertRaises(validictory.ValidationError, validictory.validate,
data, schema, fail_fast=True)
# ensure it raises a MultiError
self.assertRaises(validictory.MultipleValidationError, validictory.validate,
data, schema, fail_fast=False)
# ensure that the MultiError has 2 errors
try:
validictory.validate(data, schema, fail_fast=False)
except validictory.MultipleValidationError as mve:
assert len(mve.errors) == 2
def test_multi_error_in_list(self):
schema = {
"type": "object",
"properties": {
"words": {"type": "array", "items": {"type": "string"}},
},
}
data = {"words": ["word", 32, 2.1, True]}
# ensure it raises an error
self.assertRaises(validictory.ValidationError, validictory.validate,
data, schema, fail_fast=True)
# ensure it raises a MultiError
self.assertRaises(validictory.MultipleValidationError, validictory.validate,
data, schema, fail_fast=False)
# ensure that the MultiError has 3 errors since 3 of the items were bad
try:
validictory.validate(data, schema, fail_fast=False)
except validictory.MultipleValidationError as mve:
assert len(mve.errors) == 3
def test_multi_error_with_format(self):
schema = {
"type": "object",
"properties": {
"date": {"type": "string", "format": "date"},
"time": {"type": "string", "format": "time"}
},
}
data = {"date": "2011-02-99", "time": "30:00:00"}
# ensure it raises an error
self.assertRaises(validictory.ValidationError, validictory.validate,
data, schema, fail_fast=True)
# ensure it raises a MultiError
self.assertRaises(validictory.MultipleValidationError, validictory.validate,
data, schema, fail_fast=False)
# ensure that the MultiError has 2 errors
try:
validictory.validate(data, schema, fail_fast=False)
except validictory.MultipleValidationError as mve:
assert len(mve.errors) == 2
class TestArrayWithEnum(TestCase):
def test_multi_error_regression_wrong_schema(self):
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"e1": {"type": "array", "enum": ["one", "two"]},
}
}
data = {"name": 2, "e1": ["one", "n"]}
# ensure it raises an error
self.assertRaises(validictory.ValidationError, validictory.validate,
data, schema, fail_fast=True)
# ensure it raises a MultiError
self.assertRaises(validictory.MultipleValidationError, validictory.validate,
data, schema, fail_fast=False)
# ensure that the MultiError has 2 errors
try:
validictory.validate(data, schema, fail_fast=False)
except validictory.MultipleValidationError as mve:
print mve
assert len(mve.errors) == 2
assert 0
def test_multi_error_regression_works(self):
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"e2": {"type": "array", "items": {"type": "string", "enum": ["one", "two"]},
},
}
}
data = {"name": 2, "e2": ["one", "n"]}
# ensure it raises an error
self.assertRaises(validictory.ValidationError, validictory.validate,
data, schema, fail_fast=True)
# ensure it raises a MultiError
self.assertRaises(validictory.MultipleValidationError, validictory.validate,
data, schema, fail_fast=False)
# ensure that the MultiError has 2 errors
try:
validictory.validate(data, schema, fail_fast=False)
except validictory.MultipleValidationError as mve:
print mve
assert len(mve.errors) == 2
assert 0
| 34.029197 | 92 | 0.552338 |
203a467dff553809972a08cba2542bf5e8f8baf2 | 1,567 | py | Python | beattosetto/urls.py | HelloYeew/beattosetto | f8847fda908b0211c1221ba64854df42274b9db1 | [
"MIT"
] | 5 | 2021-11-02T15:09:24.000Z | 2022-03-24T03:03:57.000Z | beattosetto/urls.py | beattosetto/beattosetto | a4bd93fc1ca7530b73f03194847ab8b0dca86806 | [
"MIT"
] | 205 | 2021-10-12T09:13:59.000Z | 2022-03-10T10:09:07.000Z | beattosetto/urls.py | pontakornth/beattosetto | b3201922a61a9164659efc26f11dc818aa5705f2 | [
"MIT"
] | 2 | 2021-10-12T13:39:01.000Z | 2021-10-13T08:47:59.000Z | """beattosetto URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.shortcuts import render
from django.urls import path, include
from beattosetto.settings import DEBUG
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('allauth.urls')),
path('', include('beatmap_collections.urls')),
path('', include('users.urls')),
path('', include('actions.urls'))
]
if DEBUG:
def create_error_template_view(error_code):
"""
Create a function that return view for an error code.
This is for debugging purpose. Django will add templates automatically
on production.
"""
def error_template_view(request):
"""View for viewing error template on debug."""
return render(request, f"{error_code}.html")
return error_template_view
for err in [400, 403, 404, 500]:
urlpatterns.append(path(f"{err}/", create_error_template_view(err)))
| 36.44186 | 78 | 0.682833 |
e1d108b5f72e280186112423dd1fcbe62bbc3a46 | 135 | py | Python | Hotel_webapp/src/contact/admin.py | MDRCS/Fullstack-Django | 20cbae6e1b70d7051662b579e7967061e529d71f | [
"MIT"
] | null | null | null | Hotel_webapp/src/contact/admin.py | MDRCS/Fullstack-Django | 20cbae6e1b70d7051662b579e7967061e529d71f | [
"MIT"
] | 19 | 2020-07-14T07:04:43.000Z | 2022-03-12T00:41:14.000Z | Hotel_webapp/src/contact/admin.py | MDRCS/Fullstack-Django | 20cbae6e1b70d7051662b579e7967061e529d71f | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import ContactDetails
admin.site.register(ContactDetails) | 19.285714 | 35 | 0.822222 |
3e20c23e7b412c3a3a88993463ee09983b624352 | 2,974 | py | Python | train.py | Velythyl/pairwise-distance-loss-POC | 408592cd04eb7b39b9c4191663609c90f9fdcf28 | [
"MIT"
] | null | null | null | train.py | Velythyl/pairwise-distance-loss-POC | 408592cd04eb7b39b9c4191663609c90f9fdcf28 | [
"MIT"
] | null | null | null | train.py | Velythyl/pairwise-distance-loss-POC | 408592cd04eb7b39b9c4191663609c90f9fdcf28 | [
"MIT"
] | null | null | null | import functools
import os
import numpy as np
import torch
from pytorch_lightning import Trainer
from torch import nn
import torch.nn.functional as F
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
import pytorch_lightning as pl
import eval_model
from datasets import VectorTargetDataset
def pairwise_distance_loss(embeddings, targets, no_loss=False):
# embeddings = F.normalize(embeddings)
target_gram = torch.cdist(targets, targets)
embed_gram = torch.cdist(embeddings, embeddings)
return F.mse_loss(target_gram, embed_gram)
class NormalizerModule(nn.Module):
def forward(self, input):
return F.normalize(input, dim=1)
class Encoder(pl.LightningModule):
def __init__(self, loss_function):
super().__init__()
self.encoder = nn.Sequential(
nn.Linear(28 * 28, 128),
nn.ReLU(),
nn.Linear(128, 3),
nn.ReLU(),
)
self.loss_function = loss_function
def forward(self, x):
# in lightning, forward defines the prediction/inference actions
x = x.view(x.size(0), -1)
embedding = self.encoder(x)
return embedding
def training_step(self, batch, batch_idx):
# training_step defines the train loop. It is independent of forward
x, y, z = batch
x = x.view(x.size(0), -1)
embeds = self.encoder(x)
loss = self.loss_function(embeds,y)
self.log("train_loss", loss.item())
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
#def validation_step(self, *args, **kwargs):
# pass
#def on_validation_end(self) -> None:
# eval_model.main(self, eval_ds, GPU)
PATH_DATASETS = os.environ.get("PATH_DATASETS", ".")
AVAIL_GPUS = min(1, torch.cuda.device_count())
GPU = 0
BATCH_SIZE = 256 if AVAIL_GPUS else 64
# Init our model
mnist_model = Encoder(pairwise_distance_loss)
# Init DataLoader from MNIST Dataset
train_ds = VectorTargetDataset(
MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()),
dataset_seed=0,
vector_width=2,
gaussian_instead_of_uniform=True,
scale=0.1
) # MNIST(PATH_DATASETS, train=True, download=True, transform=transforms.ToTensor())
train_loader = DataLoader(train_ds, batch_size=BATCH_SIZE)
eval_ds = VectorTargetDataset(
MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor()),
dataset_seed=0,
vector_width=2,
gaussian_instead_of_uniform=True,
scale=0.1
) # MNIST(PATH_DATASETS, train=True, download=True, transform=transforms.ToTensor())
# Initialize a trainer
trainer = Trainer(
gpus=AVAIL_GPUS,
max_epochs=10,
progress_bar_refresh_rate=20,
)
# Train the model ⚡
trainer.fit(mnist_model, train_loader)
eval_model.main(mnist_model.cuda(GPU), eval_ds, GPU)
| 28.056604 | 85 | 0.700067 |
6d0bd2350e8e456625e770e3ab10f316daaa03be | 1,152 | py | Python | ivis/data/generators/generators.py | wna26/ivis | ac5e3694e92a210597e34a86a2450e90c9a12e00 | [
"Apache-2.0"
] | 271 | 2018-09-06T17:13:21.000Z | 2022-03-21T14:00:37.000Z | ivis/data/generators/generators.py | wna26/ivis | ac5e3694e92a210597e34a86a2450e90c9a12e00 | [
"Apache-2.0"
] | 65 | 2018-09-25T10:04:37.000Z | 2022-03-24T21:31:05.000Z | ivis/data/generators/generators.py | wna26/ivis | ac5e3694e92a210597e34a86a2450e90c9a12e00 | [
"Apache-2.0"
] | 34 | 2019-07-17T10:22:15.000Z | 2022-03-10T03:20:05.000Z | """Generators for non-triplet data"""
import tensorflow as tf
import numpy as np
from scipy.sparse import issparse
class KerasSequence(tf.keras.utils.Sequence):
"""Wraps inputs into a Keras Sequence to allow Keras models to predict on
arbitrary inputs which may be out of memory."""
def __init__(self, X, batch_size=32):
self.X = X
self.batch_size = batch_size
self.placeholder_labels = np.empty(batch_size, dtype=np.uint8)
self.batched_data = hasattr(X, 'get_batch')
def __len__(self):
return int(np.ceil(self.X.shape[0] / float(self.batch_size)))
def __getitem__(self, index):
batch_indices = range(index * self.batch_size, min((index + 1) * self.batch_size, self.X.shape[0]))
if self.batched_data:
batch = self.X.get_batch(batch_indices)
else:
batch = [self.X[i] for i in batch_indices]
if issparse(self.X):
batch = [ele.toarray() for ele in batch]
batch = np.squeeze(batch)
placeholder_labels = self.placeholder_labels[:len(batch_indices)]
return np.asarray(batch), placeholder_labels
| 34.909091 | 107 | 0.657986 |
e7be1e1307b9cdfd6db14920ce800e829f6e91d8 | 26,098 | py | Python | train.py | CJWBW/DeceiveD | 042c5c4a4e65b20866cba89e64cfb99961dd8343 | [
"BSD-Source-Code"
] | null | null | null | train.py | CJWBW/DeceiveD | 042c5c4a4e65b20866cba89e64cfb99961dd8343 | [
"BSD-Source-Code"
] | null | null | null | train.py | CJWBW/DeceiveD | 042c5c4a4e65b20866cba89e64cfb99961dd8343 | [
"BSD-Source-Code"
] | null | null | null | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Train a GAN using the techniques described in the paper
"Deceive D: Adaptive Pseudo Augmentation for GAN Training with Limited Data".
The code is heavily borrowed from the paper
"Training Generative Adversarial Networks with Limited Data"."""
import os
import click
import re
import json
import tempfile
import torch
import dnnlib
from training import training_loop
from metrics import metric_main
from torch_utils import training_stats
from torch_utils import custom_ops
#----------------------------------------------------------------------------
class UserError(Exception):
pass
#----------------------------------------------------------------------------
def setup_training_loop_kwargs(
# General options (not included in desc).
gpus = None, # Number of GPUs: <int>, default = 1 gpu
snap = None, # Snapshot interval: <int>, default = 50 ticks
seed = None, # Random seed: <int>, default = 0
# Dataset.
data = None, # Training dataset (required): <path>
cond = None, # Train conditional model based on dataset labels: <bool>, default = False
subset = None, # Train with only N images: <int>, default = all
mirror = None, # Augment dataset with x-flips: <bool>, default = False
# Metrics (not included in desc).
metrics = None, # List of metric names: [], ['fid50k_full'] (default), ...
metricdata = None, # Metric dataset (optional): <path>
# Base config.
cfg = None, # Base config: 'auto' (default), 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar'
gamma = None, # Override R1 gamma: <float>
kimg = None, # Override training duration: <int>
batch = None, # Override batch size: <int>
# Discriminator augmentation.
aug = None, # Augmentation mode: 'apa' (default), 'noaug', 'fixed'
p = None, # Specify p for 'fixed' (required): <float>
target = None, # Override APA target for 'apa': <float>, default = depends on aug
augpipe = None, # Augmentation pipeline: 'blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc' (default), ..., 'bgcfnc'
with_dataaug = None, # Enable standard data augmentations for the discriminator inputs: <bool>, default = False
# Transfer learning.
resume = None, # Load previous network: 'noresume' (default), 'ffhq256', 'ffhq512', 'ffhq1024', 'celebahq256', 'lsundog256', <file>, <url>
freezed = None, # Freeze-D: <int>, default = 0 discriminator layers
# Performance options (not included in desc).
fp32 = None, # Disable mixed-precision training: <bool>, default = False
nhwc = None, # Use NHWC memory format with FP16: <bool>, default = False
allow_tf32 = None, # Allow PyTorch to use TF32 for matmul and convolutions: <bool>, default = False
nobench = None, # Disable cuDNN benchmarking: <bool>, default = False
workers = None, # Override number of DataLoader workers: <int>, default = 3
):
args = dnnlib.EasyDict()
# ------------------------------------------
# General options: gpus, snap, seed
# ------------------------------------------
if gpus is None:
gpus = 1
assert isinstance(gpus, int)
if not (gpus >= 1 and gpus & (gpus - 1) == 0):
raise UserError('--gpus must be a power of two')
args.num_gpus = gpus
if snap is None:
snap = 50
assert isinstance(snap, int)
if snap < 1:
raise UserError('--snap must be at least 1')
args.image_snapshot_ticks = snap
args.network_snapshot_ticks = snap
if seed is None:
seed = 0
assert isinstance(seed, int)
args.random_seed = seed
# -----------------------------------
# Dataset: data, cond, subset, mirror
# -----------------------------------
assert data is not None
assert isinstance(data, str)
args.training_set_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data, use_labels=True, max_size=None, xflip=False)
args.data_loader_kwargs = dnnlib.EasyDict(pin_memory=True, num_workers=3, prefetch_factor=2)
try:
training_set = dnnlib.util.construct_class_by_name(**args.training_set_kwargs) # subclass of training.dataset.Dataset
args.training_set_kwargs.resolution = training_set.resolution # be explicit about resolution
args.training_set_kwargs.use_labels = training_set.has_labels # be explicit about labels
args.training_set_kwargs.max_size = len(training_set) # be explicit about dataset size
desc = training_set.name
del training_set # conserve memory
except IOError as err:
raise UserError(f'--data: {err}')
if cond is None:
cond = False
assert isinstance(cond, bool)
if cond:
if not args.training_set_kwargs.use_labels:
raise UserError('--cond=True requires labels specified in dataset.json')
desc += '-cond'
else:
args.training_set_kwargs.use_labels = False
if subset is not None:
assert isinstance(subset, int)
if not 1 <= subset <= args.training_set_kwargs.max_size:
raise UserError(f'--subset must be between 1 and {args.training_set_kwargs.max_size}')
desc += f'-subset{subset}'
if subset < args.training_set_kwargs.max_size:
args.training_set_kwargs.max_size = subset
args.training_set_kwargs.random_seed = args.random_seed
if mirror is None:
mirror = False
assert isinstance(mirror, bool)
if mirror:
desc += '-mirror'
args.training_set_kwargs.xflip = True
# ----------------------------
# Metrics: metrics, metricdata
# ----------------------------
if metrics is None:
metrics = ['fid50k_full']
assert isinstance(metrics, list)
if not all(metric_main.is_valid_metric(metric) for metric in metrics):
raise UserError('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))
args.metrics = metrics
args.metric_dataset_kwargs = dnnlib.EasyDict(args.training_set_kwargs)
if metricdata is not None:
assert isinstance(metricdata, str)
args.metric_dataset_kwargs.path = metricdata
try:
metric_dataset = dnnlib.util.construct_class_by_name(**args.metric_dataset_kwargs) # subclass of training.dataset.Dataset
args.metric_dataset_kwargs.resolution = metric_dataset.resolution # be explicit about resolution
args.metric_dataset_kwargs.use_labels = metric_dataset.has_labels # be explicit about labels
args.metric_dataset_kwargs.max_size = None # no limit to the dataset for metrics
del metric_dataset # conserve memory
except IOError as err:
raise UserError(f'--metricdata: {err}')
# ------------------------------------
# Base config: cfg, gamma, kimg, batch
# ------------------------------------
if cfg is None:
cfg = 'auto'
assert isinstance(cfg, str)
desc += f'-{cfg}'
cfg_specs = {
'auto': dict(ref_gpus=-1, kimg=25000, mb=-1, mbstd=-1, fmaps=-1, lrate=-1, gamma=-1, ema=-1, ramp=0.05, map=2), # Populated dynamically based on resolution and GPU count.
'stylegan2': dict(ref_gpus=8, kimg=25000, mb=32, mbstd=4, fmaps=1, lrate=0.002, gamma=10, ema=10, ramp=None, map=8), # Uses mixed-precision, unlike the original StyleGAN2.
'paper256': dict(ref_gpus=8, kimg=25000, mb=64, mbstd=8, fmaps=0.5, lrate=0.0025, gamma=1, ema=20, ramp=None, map=8),
'paper512': dict(ref_gpus=8, kimg=25000, mb=64, mbstd=8, fmaps=1, lrate=0.0025, gamma=0.5, ema=20, ramp=None, map=8),
'paper1024': dict(ref_gpus=8, kimg=25000, mb=32, mbstd=4, fmaps=1, lrate=0.002, gamma=2, ema=10, ramp=None, map=8),
'cifar': dict(ref_gpus=2, kimg=100000, mb=64, mbstd=32, fmaps=1, lrate=0.0025, gamma=0.01, ema=500, ramp=0.05, map=2),
}
assert cfg in cfg_specs
spec = dnnlib.EasyDict(cfg_specs[cfg])
spec.ref_gpus = gpus
if cfg == 'auto':
desc += f'{gpus:d}'
spec.ref_gpus = gpus
res = args.training_set_kwargs.resolution
spec.mb = max(min(gpus * min(4096 // res, 32), 64), gpus) # keep gpu memory consumption at bay
spec.mbstd = min(spec.mb // gpus, 4) # other hyperparams behave more predictably if mbstd group size remains fixed
spec.fmaps = 1 if res >= 512 else 0.5
spec.lrate = 0.002 if res >= 1024 else 0.0025
spec.gamma = 0.0002 * (res ** 2) / spec.mb # heuristic formula
spec.ema = spec.mb * 10 / 32
args.G_kwargs = dnnlib.EasyDict(class_name='training.networks.Generator', z_dim=512, w_dim=512, mapping_kwargs=dnnlib.EasyDict(), synthesis_kwargs=dnnlib.EasyDict())
args.D_kwargs = dnnlib.EasyDict(class_name='training.networks.Discriminator', block_kwargs=dnnlib.EasyDict(), mapping_kwargs=dnnlib.EasyDict(), epilogue_kwargs=dnnlib.EasyDict())
args.G_kwargs.synthesis_kwargs.channel_base = args.D_kwargs.channel_base = int(spec.fmaps * 32768)
args.G_kwargs.synthesis_kwargs.channel_max = args.D_kwargs.channel_max = 512
args.G_kwargs.mapping_kwargs.num_layers = spec.map
args.G_kwargs.synthesis_kwargs.num_fp16_res = args.D_kwargs.num_fp16_res = 4 # enable mixed-precision training
args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = 256 # clamp activations to avoid float16 overflow
args.D_kwargs.epilogue_kwargs.mbstd_group_size = spec.mbstd
args.G_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)
args.D_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)
args.loss_kwargs = dnnlib.EasyDict(class_name='training.loss.StyleGAN2Loss', r1_gamma=spec.gamma)
args.total_kimg = spec.kimg
args.batch_size = spec.mb
args.batch_gpu = spec.mb // spec.ref_gpus
args.ema_kimg = spec.ema
args.ema_rampup = spec.ramp
if cfg == 'cifar':
args.loss_kwargs.pl_weight = 0 # disable path length regularization
args.loss_kwargs.style_mixing_prob = 0 # disable style mixing
args.D_kwargs.architecture = 'orig' # disable residual skip connections
if gamma is not None:
assert isinstance(gamma, float)
if not gamma >= 0:
raise UserError('--gamma must be non-negative')
desc += f'-gamma{gamma:g}'
args.loss_kwargs.r1_gamma = gamma
if kimg is not None:
assert isinstance(kimg, int)
if not kimg >= 1:
raise UserError('--kimg must be at least 1')
desc += f'-kimg{kimg:d}'
args.total_kimg = kimg
if batch is not None:
assert isinstance(batch, int)
if not (batch >= 1 and batch % gpus == 0):
raise UserError('--batch must be at least 1 and divisible by --gpus')
desc += f'-batch{batch}'
args.batch_size = batch
args.batch_gpu = batch // gpus
# ---------------------------------------------------
# Discriminator augmentation: aug, p, target, augpipe
# ---------------------------------------------------
if aug is None:
aug = 'apa'
else:
assert isinstance(aug, str)
desc += f'-{aug}'
if aug == 'apa':
args.apa_target = 0.6
elif aug == 'noaug':
pass
elif aug == 'fixed':
if p is None:
raise UserError(f'--aug={aug} requires specifying --p')
else:
raise UserError(f'--aug={aug} not supported')
if with_dataaug is None:
with_dataaug = False
assert isinstance(with_dataaug, bool)
if with_dataaug:
if aug == 'noaug':
raise UserError(f'--with-dataaug=true cannot be specified with --aug=noaug')
desc += f'-wdataaug'
args.with_dataaug = with_dataaug
if p is not None:
assert isinstance(p, float)
if aug != 'fixed':
raise UserError('--p can only be specified with --aug=fixed')
if not 0 <= p <= 1:
raise UserError('--p must be between 0 and 1')
desc += f'-p{p:g}'
args.augment_p = p
if target is not None:
assert isinstance(target, float)
if aug != 'apa':
raise UserError('--target can only be specified with --aug=apa')
if not 0 <= target <= 1:
raise UserError('--target must be between 0 and 1')
desc += f'-target{target:g}'
args.apa_target = target
assert augpipe is None or isinstance(augpipe, str)
if augpipe is None:
augpipe = 'bgc'
else:
if aug == 'noaug':
raise UserError('--augpipe cannot be specified with --aug=noaug')
desc += f'-{augpipe}'
augpipe_specs = {
'blit': dict(xflip=1, rotate90=1, xint=1),
'geom': dict(scale=1, rotate=1, aniso=1, xfrac=1),
'color': dict(brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),
'filter': dict(imgfilter=1),
'noise': dict(noise=1),
'cutout': dict(cutout=1),
'bg': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1),
'bgc': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),
'bgcf': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1),
'bgcfn': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1, noise=1),
'bgcfnc': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1, noise=1, cutout=1),
}
assert augpipe in augpipe_specs
if aug != 'noaug':
args.augment_kwargs = dnnlib.EasyDict(class_name='training.augment.AugmentPipe', **augpipe_specs[augpipe])
# ----------------------------------
# Transfer learning: resume, freezed
# ----------------------------------
resume_specs = {
'ffhq256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res256-mirror-paper256-noaug.pkl',
'ffhq512': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res512-mirror-stylegan2-noaug.pkl',
'ffhq1024': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res1024-mirror-stylegan2-noaug.pkl',
'celebahq256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/celebahq-res256-mirror-paper256-kimg100000-ada-target0.5.pkl',
'lsundog256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/lsundog-res256-paper256-kimg100000-noaug.pkl',
}
assert resume is None or isinstance(resume, str)
if resume is None:
resume = 'noresume'
elif resume == 'noresume':
desc += '-noresume'
elif resume in resume_specs:
desc += f'-resume{resume}'
args.resume_pkl = resume_specs[resume] # predefined url
else:
desc += '-resumecustom'
args.resume_pkl = resume # custom path or url
if resume != 'noresume':
args.apa_kimg = 100 # make APA react faster at the beginning
args.ema_rampup = None # disable EMA rampup
if freezed is not None:
assert isinstance(freezed, int)
if not freezed >= 0:
raise UserError('--freezed must be non-negative')
desc += f'-freezed{freezed:d}'
args.D_kwargs.block_kwargs.freeze_layers = freezed
# -------------------------------------------------
# Performance options: fp32, nhwc, nobench, workers
# -------------------------------------------------
if fp32 is None:
fp32 = False
assert isinstance(fp32, bool)
if fp32:
args.G_kwargs.synthesis_kwargs.num_fp16_res = args.D_kwargs.num_fp16_res = 0
args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = None
if nhwc is None:
nhwc = False
assert isinstance(nhwc, bool)
if nhwc:
args.G_kwargs.synthesis_kwargs.fp16_channels_last = args.D_kwargs.block_kwargs.fp16_channels_last = True
if nobench is None:
nobench = False
assert isinstance(nobench, bool)
if nobench:
args.cudnn_benchmark = False
if allow_tf32 is None:
allow_tf32 = False
assert isinstance(allow_tf32, bool)
if allow_tf32:
args.allow_tf32 = True
if workers is not None:
assert isinstance(workers, int)
if not workers >= 1:
raise UserError('--workers must be at least 1')
args.data_loader_kwargs.num_workers = workers
return desc, args
#----------------------------------------------------------------------------
def subprocess_fn(rank, args, temp_dir):
dnnlib.util.Logger(file_name=os.path.join(args.run_dir, 'log.txt'), file_mode='a', should_flush=True)
# Init torch.distributed.
if args.num_gpus > 1:
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
# Init torch_utils.
sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if rank != 0:
custom_ops.verbosity = 'none'
# Execute training loop.
training_loop.training_loop(rank=rank, **args)
#----------------------------------------------------------------------------
class CommaSeparatedList(click.ParamType):
name = 'list'
def convert(self, value, param, ctx):
_ = param, ctx
if value is None or value.lower() == 'none' or value == '':
return []
return value.split(',')
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
# General options.
@click.option('--outdir', help='Where to save the results', required=True, metavar='DIR')
@click.option('--gpus', help='Number of GPUs to use [default: 1]', type=int, metavar='INT')
@click.option('--snap', help='Snapshot interval [default: 50 ticks]', type=int, metavar='INT')
@click.option('--seed', help='Random seed [default: 0]', type=int, metavar='INT')
@click.option('-n', '--dry-run', help='Print training options and exit', is_flag=True)
# Dataset.
@click.option('--data', help='Training data (directory or zip)', metavar='PATH', required=True)
@click.option('--cond', help='Train conditional model based on dataset labels [default: false]', type=bool, metavar='BOOL')
@click.option('--subset', help='Train with only N images [default: all]', type=int, metavar='INT')
@click.option('--mirror', help='Enable dataset x-flips [default: false]', type=bool, metavar='BOOL')
# Metrics.
@click.option('--metrics', help='Comma-separated list or "none" [default: fid50k_full]', type=CommaSeparatedList())
@click.option('--metricdata', help='Dataset to evaluate metrics against (directory or zip) [default: same as training data]', metavar='PATH')
# Base config.
@click.option('--cfg', help='Base config [default: auto]', type=click.Choice(['auto', 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar']))
@click.option('--gamma', help='Override R1 gamma', type=float)
@click.option('--kimg', help='Override training duration', type=int, metavar='INT')
@click.option('--batch', help='Override batch size', type=int, metavar='INT')
# Discriminator augmentation.
@click.option('--aug', help='Augmentation mode [default: apa]', type=click.Choice(['noaug', 'apa', 'fixed']))
@click.option('--p', help='Augmentation probability for --aug=fixed', type=float)
@click.option('--target', help='APA target value for --aug=apa', type=float)
@click.option('--augpipe', help='Augmentation pipeline [default: bgc]', type=click.Choice(['blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc', 'bgcf', 'bgcfn', 'bgcfnc']))
@click.option('--with-dataaug', help='Enable standard data augmentations for the discriminator inputs [default: false]', type=bool, metavar='BOOL')
# Transfer learning.
@click.option('--resume', help='Resume training [default: noresume]', metavar='PKL')
@click.option('--freezed', help='Freeze-D [default: 0 layers]', type=int, metavar='INT')
# Performance options.
@click.option('--fp32', help='Disable mixed-precision training', type=bool, metavar='BOOL')
@click.option('--nhwc', help='Use NHWC memory format with FP16', type=bool, metavar='BOOL')
@click.option('--nobench', help='Disable cuDNN benchmarking', type=bool, metavar='BOOL')
@click.option('--allow-tf32', help='Allow PyTorch to use TF32 internally', type=bool, metavar='BOOL')
@click.option('--workers', help='Override number of DataLoader workers', type=int, metavar='INT')
def main(ctx, outdir, dry_run, **config_kwargs):
"""Train a GAN using the techniques described in the paper
"Deceive D: Adaptive Pseudo Augmentation for GAN Training with Limited Data".
The code is heavily borrowed from the paper
"Training Generative Adversarial Networks with Limited Data".
Examples:
\b
# Train with custom dataset using 1 GPU.
python train.py --outdir=~/training-runs --data=~/mydataset.zip --gpus=1
\b
# Train class-conditional CIFAR-10 using 2 GPUs.
python train.py --outdir=~/training-runs --data=~/datasets/cifar10.zip \\
--gpus=2 --cfg=cifar --cond=1
\b
# Transfer learn MetFaces from FFHQ using 4 GPUs.
python train.py --outdir=~/training-runs --data=~/datasets/metfaces.zip \\
--gpus=4 --cfg=paper1024 --mirror=1 --resume=ffhq1024 --snap=10
\b
# Reproduce original StyleGAN2 config F.
python train.py --outdir=~/training-runs --data=~/datasets/ffhq.zip \\
--gpus=8 --cfg=stylegan2 --mirror=1 --aug=noaug
\b
Base configs (--cfg):
auto Automatically select reasonable defaults based on resolution
and GPU count. Good starting point for new datasets.
stylegan2 Reproduce results for StyleGAN2 config F at 1024x1024.
paper256 Reproduce results for FFHQ and LSUN Cat at 256x256.
paper512 Reproduce results for BreCaHAD and AFHQ at 512x512.
paper1024 Reproduce results for MetFaces at 1024x1024.
cifar Reproduce results for CIFAR-10 at 32x32.
\b
Transfer learning source networks (--resume):
ffhq256 FFHQ trained at 256x256 resolution.
ffhq512 FFHQ trained at 512x512 resolution.
ffhq1024 FFHQ trained at 1024x1024 resolution.
celebahq256 CelebA-HQ trained at 256x256 resolution.
lsundog256 LSUN Dog trained at 256x256 resolution.
<PATH or URL> Custom network pickle.
"""
dnnlib.util.Logger(should_flush=True)
# Setup training options.
try:
run_desc, args = setup_training_loop_kwargs(**config_kwargs)
except UserError as err:
ctx.fail(err)
# Pick output directory.
prev_run_dirs = []
if os.path.isdir(outdir):
prev_run_dirs = [x for x in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, x))]
prev_run_ids = [re.match(r'^\d+', x) for x in prev_run_dirs]
prev_run_ids = [int(x.group()) for x in prev_run_ids if x is not None]
cur_run_id = max(prev_run_ids, default=-1) + 1
args.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{run_desc}')
assert not os.path.exists(args.run_dir)
# Print options.
print()
print('Training options:')
print(json.dumps(args, indent=2))
print()
print(f'Output directory: {args.run_dir}')
print(f'Training data: {args.training_set_kwargs.path}')
print(f'Training duration: {args.total_kimg} kimg')
print(f'Number of GPUs: {args.num_gpus}')
print(f'Number of images: {args.training_set_kwargs.max_size}')
print(f'Image resolution: {args.training_set_kwargs.resolution}')
print(f'Conditional model: {args.training_set_kwargs.use_labels}')
print(f'Dataset x-flips: {args.training_set_kwargs.xflip}')
print()
# Dry run?
if dry_run:
print('Dry run; exiting.')
return
# Create output directory.
print('Creating output directory...')
os.makedirs(args.run_dir)
with open(os.path.join(args.run_dir, 'training_options.json'), 'wt') as f:
json.dump(args, f, indent=2)
# Launch processes.
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if args.num_gpus == 1:
subprocess_fn(rank=0, args=args, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 44.996552 | 192 | 0.632386 |
1dc02c28205a90ec1865b7fb3bb5e5f71b7556ef | 25 | py | Python | statmorph/__init__.py | astro-nova/statmorph | 7d4965dcd6559c8eda912f7c528e3cabc35b8cd8 | [
"BSD-3-Clause"
] | 31 | 2017-08-28T22:06:55.000Z | 2022-03-30T06:56:45.000Z | statmorph/__init__.py | astro-nova/statmorph | 7d4965dcd6559c8eda912f7c528e3cabc35b8cd8 | [
"BSD-3-Clause"
] | 9 | 2017-10-27T15:41:39.000Z | 2020-09-04T04:40:55.000Z | statmorph/__init__.py | astro-nova/statmorph | 7d4965dcd6559c8eda912f7c528e3cabc35b8cd8 | [
"BSD-3-Clause"
] | 13 | 2018-02-12T20:23:08.000Z | 2021-12-10T05:12:28.000Z | from .statmorph import *
| 12.5 | 24 | 0.76 |
84cbc165ceb8bfe1f617dbcf79095ee584aa5fbc | 958 | py | Python | build.py | krzem5/Java-Launguage | d12a4c31a25f40b430885d1ebb9c2945c0d0462d | [
"BSD-3-Clause"
] | null | null | null | build.py | krzem5/Java-Launguage | d12a4c31a25f40b430885d1ebb9c2945c0d0462d | [
"BSD-3-Clause"
] | null | null | null | build.py | krzem5/Java-Launguage | d12a4c31a25f40b430885d1ebb9c2945c0d0462d | [
"BSD-3-Clause"
] | null | null | null | import os
import subprocess
import sys
import zipfile
if (os.path.exists("build")):
dl=[]
for r,ndl,fl in os.walk("build"):
dl=[os.path.join(r,k) for k in ndl]+dl
for f in fl:
os.remove(os.path.join(r,f))
for k in dl:
os.rmdir(k)
else:
os.mkdir("build")
cd=os.getcwd()
os.chdir("src")
jfl=[]
for r,_,fl in os.walk("."):
for f in fl:
if (f[-5:]==".java"):
jfl.append(os.path.join(r,f))
if (subprocess.run(["javac","-d","../build"]+jfl).returncode!=0):
sys.exit(1)
os.chdir(cd)
with zipfile.ZipFile("build/language.jar","w") as zf:
print("Writing: META-INF/MANIFEST.MF")
zf.write("manifest.mf",arcname="META-INF/MANIFEST.MF")
for r,_,fl in os.walk("build"):
for f in fl:
if (f[-6:]==".class"):
print(f"Writing: {os.path.join(r,f)[6:].replace(chr(92),'/')}")
zf.write(os.path.join(r,f),os.path.join(r,f)[6:])
if ("--run" in sys.argv):
subprocess.run(["java","-jar","build/language.jar","-c","prog.cjl","-d","-r","prog"])
| 25.210526 | 86 | 0.611691 |
626e7d3fec1c2a2018cb9735cc6072f414f0d52e | 931 | py | Python | lib/spack/docs/tutorial/examples/4.package.py | RemoteConnectionManager/spack | f2967b6c16effd26ce007cf86cadbb645c574f50 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2019-05-19T12:24:44.000Z | 2019-05-24T10:58:09.000Z | lib/spack/docs/tutorial/examples/4.package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 17 | 2018-09-20T18:32:50.000Z | 2019-12-04T16:58:12.000Z | lib/spack/docs/tutorial/examples/4.package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-09-21T07:45:10.000Z | 2019-09-21T07:45:10.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mpileaks(Package):
"""Tool to detect and report MPI objects like MPI_Requests and
MPI_Datatypes."""
homepage = "https://github.com/LLNL/mpileaks"
url = "https://github.com/LLNL/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz"
version('1.0', sha256='2e34cc4505556d1c1f085758e26f2f8eea0972db9382f051b2dcfb1d7d9e1825')
depends_on('mpi')
depends_on('adept-utils')
depends_on('callpath')
def install(self, spec, prefix):
configure('--prefix={0}'.format(prefix),
'--with-adept-utils={0}'.format(spec['adept-utils'].prefix),
'--with-callpath={0}'.format(spec['callpath'].prefix))
make()
make('install')
| 33.25 | 93 | 0.670247 |
2a1b266e4e4ff9f53d009c4629e26b0319b54edf | 1,338 | py | Python | geno_sugar/preprocess.py | quattro/geno-sugar | ffe85162e440685b1b7fbe2c7acb0dcc0d66dd76 | [
"Apache-2.0"
] | null | null | null | geno_sugar/preprocess.py | quattro/geno-sugar | ffe85162e440685b1b7fbe2c7acb0dcc0d66dd76 | [
"Apache-2.0"
] | 3 | 2018-10-11T09:45:17.000Z | 2020-02-12T15:14:36.000Z | geno_sugar/preprocess.py | quattro/geno-sugar | ffe85162e440685b1b7fbe2c7acb0dcc0d66dd76 | [
"Apache-2.0"
] | 3 | 2018-10-01T09:31:02.000Z | 2019-03-27T15:52:08.000Z | from .utils import standardize_snps
import scipy as sp
def filter_by_missing(max_miss=0.01):
"""
return function that filters by missing values
(takes maximum fraction of missing values, default is 0.01)
"""
def f(G, bim):
Isnp = sp.isnan(G).mean(0) < max_miss
G_out = G[:, Isnp]
bim_out = bim[Isnp]
return G_out, bim_out
return f
def filter_by_maf(min_maf=0.01):
"""
return function that filters by maf
(takes minimum maf, default is 0.01)
"""
def f(G, bim):
maf = 0.5 * G.mean(0)
maf[maf > 0.5] = 1.0 - maf[maf > 0.5]
Isnp = maf > min_maf
G_out = G[:, Isnp]
bim_out = bim[Isnp]
return G_out, bim_out
return f
def standardize():
"""
return variant standarize function
"""
def f(G, bim):
G_out = standardize_snps(G)
return G_out, bim
return f
def impute(imputer):
"""
return impute function
"""
def f(G, bim):
return imputer.fit_transform(G), bim
return f
def compose(func_list):
"""
composion of preprocessing functions
"""
def f(G, bim):
for func in func_list:
G, bim = func(G, bim)
return G, bim
return f
| 18.84507 | 64 | 0.532885 |
b43dbce47042e686b05f93efcf48902e58f18d89 | 993 | py | Python | tests/test_family_item.py | bodik/nessus-report-parser | 4b8409d72842bd56ee78273889ec400eb767acd7 | [
"MIT"
] | 8 | 2018-08-09T14:23:44.000Z | 2021-11-15T22:43:38.000Z | tests/test_family_item.py | bodik/nessus-report-parser | 4b8409d72842bd56ee78273889ec400eb767acd7 | [
"MIT"
] | 1 | 2019-05-21T21:51:26.000Z | 2019-05-21T21:51:26.000Z | tests/test_family_item.py | bodik/nessus-report-parser | 4b8409d72842bd56ee78273889ec400eb767acd7 | [
"MIT"
] | 4 | 2019-02-27T12:19:51.000Z | 2021-11-15T22:43:39.000Z | import unittest
from lxml import etree
from nessus_report_parser import FamilyItem
class TestFamilyItem(unittest.TestCase):
def test_well_formed_node(self):
node = "<FamilyItem>" \
"<FamilyName>Scientific Linux Local Security Checks</FamilyName>" \
"<Status>enabled</Status>" \
"</FamilyItem>"
expected_dict = {'family_name': 'Scientific Linux Local Security Checks',
'status': 'enabled'}
self.assertEqual(expected_dict,
FamilyItem.from_etree(etree.XML(node)))
def test_family_item_repr(self):
item = FamilyItem({'family_name': 'Família Dinossauro',
'status': 'Cancelado!'})
expected = {'family_name': 'Família Dinossauro', 'status': 'Cancelado!'}
self.assertEqual(expected, item.data)
def test_create_invalid_family_item(self):
self.assertRaises(AssertionError, FamilyItem, {'family': 'error'}) | 35.464286 | 82 | 0.624371 |
2b78bef0befdef3d618415b91152c856d754367b | 46,064 | py | Python | xData/interactivePlot/multiplot.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 14 | 2019-08-29T23:46:24.000Z | 2022-03-21T10:16:25.000Z | xData/interactivePlot/multiplot.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 1 | 2020-08-04T16:14:45.000Z | 2021-12-01T01:54:34.000Z | xData/interactivePlot/multiplot.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 2 | 2022-03-03T22:41:41.000Z | 2022-03-03T22:54:43.000Z | # <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
"""
Interactive plotting with PyQt5
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The main entry is the class MultiPlotWithPyQt5 which requires input arguments for the plot title, x-axis label, y-axis
label, and a dictionary which contains the plot data. The latter dictionary is expected to have the plot labels as keys
and each value should be a tuple of two lists. The list at index 0 is expected to the plot's independent axis data,
while the list at index 1 is expected to contain the dependent axis data.
Sample Use:
MultiPlotWithPyQt5(title, xLabel, yLabel, plotData)
"""
import matplotlib # Weird bug on LC with Python 3.8.2 if matplotlib is imported after PyQt5
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QLineEdit, QLabel, QCheckBox, QVBoxLayout, QTabWidget, \
QDialog, QScrollArea, QComboBox, QGridLayout, QGroupBox
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtGui import QColor, QPalette, QFont, QIntValidator, QDoubleValidator
from collections import OrderedDict
import re
import numpy
import warnings
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from matplotlib import pyplot
from matplotlib.transforms import Bbox
matplotlib.use('Qt5Agg')
# ignore matplotlib warning for loc="best" since it is only used for cases with less than 10 plots
warnings.filterwarnings("ignore", message=".*Creating legend with loc.*")
class MultiPlotWithPyQt5:
"""
Main class to plot cross sections with PyQt5 which in turn calls the main PyQt5 application window class
:param plotAttributes: Dictionary with the plot title and axis labels as entries.
:param plotData: Dictionary with plot labels as keys and each value as a tuple of x-value list and y-value list.
:param plotType: Variable indicating the plot type (i.e. '2d' or '3d') (string)
"""
def __init__(self, plotAttributes, plotData, plotType='2d'):
plotWindows = MainWindow(plotAttributes, plotData, plotType)
plotWindows.applicationObject.exec()
# explicitly delete the reference to QMainWindow to avoid segmentation fault 11
plotWindows.deleteLater()
class MainWindow(QMainWindow):
"""
The main PyQt5 application window class which is derived from PyQt5.QtWidgets.QMainWindow.
this class initializes the PyQt5.QtWidgets.QApplication which manages the GUI application's control flow and main
settings. It initially sets the application style to "fusion", i.e. QApplication::setStyle("fusion") which is
required to address OSX idiosyncrasies with the use of colors with PyQt5.QtWidgets.QComboBox.
After setting the geometry it launches a main window (which embeds the matplotlib plot) and an auxiliary window
which contains controls for plot attributes.
:param plotAttributes: Dictionary with the plot title and axis labels as entries.
:param plotData: Dictionary with plot labels as keys and each value as a tuple of x-value list and y-value list.
:param plotType: Variable indicating the plot type (i.e. '2d' or '3d') (string)
"""
def __init__(self, plotAttributes, plotData, plotType):
self.applicationObject = QApplication([])
self.applicationObject.setStyle("fusion") # required to have QCombobox colors on osx
super().__init__()
# initialize plot window
self.setWindowTitle(plotAttributes['title'])
self.plotType = plotType
magnification = 1 if plotType == '2d' else 1.5
# window dimensions from default pyplot parameters
dpi = 100
widthInches, heightInches = pyplot.rcParams["figure.figsize"]
width = widthInches*dpi*magnification
height = heightInches*dpi*magnification
self.setGeometry(0, 0, width, height)
# generate plot
self.plotInstance = plotCanvas(self, widthInches, heightInches, plotAttributes, plotData, dpi, plotType)
self.setCentralWidget(self.plotInstance)
self.addToolBar(NavigationToolbar(self.plotInstance, self))
self.show()
x0, y0 = width, 0
width = 800
height = 300
self.dialogWindow = DialogWindow(width, height, plotAttributes, plotObject=self.plotInstance, plotType=plotType,
parent=self)
self.dialogWindow.setGeometry(x0, y0, width, height)
self.dialogWindow.show()
def closeEvent(self, event):
# ensure dialog window is closed
if self.plotType == '2d':
self.dialogWindow.accept()
class plotCanvas(FigureCanvas):
"""
The canvas the plot figure renders to.
Class derived from matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg to embed the the plot. It loops through the
entries in the plot data dictionary and uses the keys as plot labels. The values of the plot data dictionary are
expected to be a tuple withthe list at index 0 represeting the independent ("x") data and the list at index 1, the
dependent data ("y").
For cases with less than 10 plots the legend is placed at the matplotlib "best" location and is allowed to be
draggable. For cases with more than 10 plots the legend is fixed to the left of the plot window and is allowed to be
scrollable.
:param parent: The parent class which in this case is the PyQt5 application window class.
:param width: Window width in inches, derived from pyplot.rcParams["figure.figsize"] in the parent class (float).
:param height: Window height in inches, derived from pyplot.rcParams["figure.figsize"] in the parent class (float).
:param plotAttributes: Dictionary with the plot title and axis labels as entries.
:param plotData: Dictionary with plot labels as keys and each value as a tuple of x-value list and y-value list.
:param dpi: Figure resolution (float).
:param plotType: Variable indicating the plot type (i.e. '2d' or '3d') (string)
"""
def __init__(self, parent, width, height, plotAttributes, plotData, dpi, plotType):
self.plotFigure = Figure(figsize=(width, height), dpi=dpi)
FigureCanvas.__init__(self, self.plotFigure)
self.setParent(parent)
# not sure what this does ... keep it around anyway
# FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
# FigureCanvas.updateGeometry(self)
availablePlots = ['2d', '3d']
assert plotType in availablePlots, \
'Unrecognized plot type ... expected one of the following: %s' % ', '.join(availablePlots)
self.plotAxis = None
self.gridOn = None
self.dataLimits = None
if plotType == '2d':
self.plot2d(plotData, plotAttributes)
assert self.plotAxis is not None
self.legendDraggableNotScrollable = len(self.plotAxis.lines) < 10
self.percentPlotWidth = 1.0 if self.legendDraggableNotScrollable else 0.8
self.legend = self.addLegend(self.legendDraggableNotScrollable)
elif plotType == '3d':
self.plot3d(plotData, plotAttributes)
self.draw()
def plot2d(self, plotData, plotAttributes):
self.plotAxis = self.figure.add_subplot(111)
floatInfo = numpy.finfo(numpy.float())
self.dataLimits = {'x': [floatInfo.max, floatInfo.min], 'y': [floatInfo.max, floatInfo.min]}
for plotLegend in plotData.keys():
if (not isinstance(plotData[plotLegend], (list, tuple))) or (len(plotData[plotLegend]) != 2):
warnings.warn('No plot for legend "%s" due to incorrect dictionary value object type' % plotLegend)
continue
xValues = plotData[plotLegend][0]
self.dataLimits['x'] = [min(self.dataLimits['x'][0], min(xValues)),
max(self.dataLimits['x'][1], max(xValues))]
if (not isinstance(xValues, (list, tuple))) or (len(xValues) == 0):
warnings.warn('No plot for legend "%s" due to zero length xValue array' % plotLegend)
continue
yValues = plotData[plotLegend][1]
self.dataLimits['y'] = [min(self.dataLimits['y'][0], min(yValues)),
max(self.dataLimits['y'][1], max(yValues))]
if len(yValues) == 0:
warnings.warn('No plot for legend "%s" due to zero length yValue array' % plotLegend)
continue
self.plotAxis.plot(xValues, yValues, label=plotLegend)
xleft = self.dataLimits['x'][0] if plotAttributes.get('xMin', '') == '' else float(plotAttributes['xMin'])
xright = self.dataLimits['x'][1] if plotAttributes.get('xMax', '') == '' else float(plotAttributes['xMax'])
self.plotAxis.set_xlim(xleft, xright)
yleft = self.dataLimits['y'][0] if plotAttributes.get('yMin', '') == '' else float(plotAttributes['yMin'])
yright = self.dataLimits['y'][1] if plotAttributes.get('yMax', '') == '' else float(plotAttributes['yMax'])
self.plotAxis.set_ylim(yleft, yright)
if plotAttributes.get('title', '') != '':
self.plotAxis.set_title(plotAttributes['title'])
self.plotAxis.set_xlabel(plotAttributes['xLabel'])
self.plotAxis.set_ylabel(plotAttributes['yLabel'])
logs = plotAttributes.get('logs', 0)
xlog = logs % 2
ylog = ( logs // 2 ) % 2
linlog = {0: 'linear', 1: 'log'}
self.plotAxis.set_xscale(linlog[xlog])
self.plotAxis.set_yscale(linlog[ylog])
self.plotAxis.tick_params(which='major', direction='in', bottom=True, top=True, left=True, right=True)
self.plotAxis.tick_params(which='minor', bottom=False, top=False, left=False, right=False)
self.gridOn = True
self.plotAxis.grid(self.gridOn, linestyle='dashed')
def plot3d(self, plotData, plotAttributes):
from matplotlib import cm as colorMap
self.plotAxis = self.figure.add_subplot(111, projection='3d')
assert len(plotData) == 1, '3d Plots limited to a single plot'
plotLegend = [key for key in plotData.keys()][0]
if (not isinstance(plotData[plotLegend], (list, tuple))) or (len(plotData[plotLegend]) != 3):
warnings.warn('No plot for legend "%s" due to incorrect dictionary value object type' % plotLegend)
xValues = plotData[plotLegend][0]
self.dataLimits = {'x': [xValues.min(), xValues.max()]}
if len(xValues) == 0:
warnings.warn('No plot for legend "%s" due to zero length xValue array' % plotLegend)
yValues = plotData[plotLegend][1]
self.dataLimits['y'] = [yValues.min(), yValues.max()]
if len(yValues) == 0:
warnings.warn('No plot for legend "%s" due to zero length yValue array' % plotLegend)
zValues = plotData[plotLegend][2]
self.dataLimits['z'] = [zValues.min(), zValues.max()]
if len(zValues) == 0:
warnings.warn('No plot for legend "%s" due to zero length zValue array' % plotLegend)
surfacePlot = self.plotAxis.plot_surface(xValues, yValues, zValues, cmap=colorMap.jet, linewidth=0,
antialiased=False)
self.plotAxis.set_xlabel(plotAttributes['xLabel'])
self.plotAxis.set_ylabel(plotAttributes['yLabel'])
self.plotAxis.set_zlabel(plotAttributes['zLabel'])
self.gridOn = True
self.plotAxis.grid(self.gridOn, linestyle='dashed')
self.plotAxis.view_init(25, 15)
self.plotFigure.colorbar(surfacePlot, shrink=0.5, aspect=10)
def addLegend(self, draggableLegend):
"""
Method to create plot legend.
The variable self.legendDraggableNotScrollable is used to decide whether the legend should be inside or outside
the plot region.
If self.legendDraggableNotScrollable == True:
- Legend is placed inside the plot region;
- Plot region maintains the default size;
- Depending on the value of the draggableLegend variable, the legend location is fixed
(draggableLegend == False) or draggable (draggableLegend == True); and
- Legend is never scrollable
If self.legendDraggableNotScrollable == False:
- Plot region is shrinked and legend is placed on the outside; and
- Legend is allowed to be scrollable
:param draggableLegend: Variable to set legend draggable (boolean) if self.legendDraggableNotScrollable is True
:return: matplotlib.axes.legend object
"""
if self.legendDraggableNotScrollable:
if draggableLegend:
# draggable legend
legend = self.plotAxis.legend(loc='best')
legend.set_draggable(True)
else:
transformation = self.legend.axes.transAxes.inverted()
boundingBox = self.legend.get_bbox_to_anchor().transformed(transformation)
location = self.legend.get_window_extent().transformed(transformation)
legend = self.plotAxis.legend(loc=(location.x0, location.y0), bbox_to_anchor=boundingBox)
else:
# Shrink current axis by 20%
box = self.plotAxis.get_position()
self.plotAxis.set_position([box.x0, box.y0, box.width * self.percentPlotWidth, box.height])
self.percentPlotWidth = 1.0 # ensure that axis is only shrinked once
legend = self.plotAxis.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
self.mpl_connect("scroll_event", self.legendScroll)
return legend
def redrawPlot(self):
"""
Method to redraw plot and is normally called after a plot property is changed.
"""
draggableLegend = self.legend.get_draggable()
self.legend = self.addLegend(draggableLegend)
self.plotFigure.canvas.draw_idle()
def legendScroll(self, event):
"""
Method to enable scrolling of the plot legend.
Useful for cases with a large number of plots (currently implemented for cases with more than 10 plots) and
based on a response to a stackoverflow question (
see https://stackoverflow.com/questions/55863590/adding-scroll-button-to-matlibplot-axes-legend accessed on
03/24/2020).
:param event: Mouse scroll event (type matplotlib.backend_bases.MouseEvent).
"""
# pixels to scroll per mousewheel event
d = {"down": 30, "up": -30}
if self.legend.contains(event):
boundingBox = self.legend.get_bbox_to_anchor()
boundingBox = Bbox.from_bounds(boundingBox.x0, boundingBox.y0 + d[event.button], boundingBox.width,
boundingBox.height)
transformation = self.legend.axes.transAxes.inverted()
# limit scrolling to keep legend visible
windowExtent = self.legend.get_window_extent().transformed(transformation)
legendScrollable = (event.button == 'up' and windowExtent.y1 >= 1) or \
(event.button == 'down' and windowExtent.y0 < 0)
if legendScrollable:
self.legend.set_bbox_to_anchor(boundingBox.transformed(transformation))
self.plotFigure.canvas.draw_idle()
class DialogWindow(QDialog):
"""
The dialogue window with user controls for plot attributes.
Dialogue window derived from PyQt5.QtWidgets.QDialog. It generates separate tabs for overall plot options and
individual plot data options.
The overall options include controls for the font size, labels, data ranges and the ability to toggle the legend
display.
The individual plot data options allows for the control of the plot display; plot label; plot line color, type and
width; as well as the plot marker type and size. This tab is allowed to be scrollable to allow for the control of a
large number of individual plots.
:param width: Window width (float).
:param height: Window height (float).
:param plotObject: Plot object to be passed on to the children classes to access the plot figure and axis handles.
:param parent: The parent class which in this case is the MainWindow class.
"""
def __init__(self, width, height, plotAttributes, plotObject, plotType, parent=None):
super().__init__(parent)
self.parentWindow = parent
self.setMinimumSize(QSize(width, height))
self.setWindowTitle('Fudge Interactive Plot Controls')
tabWidget = QTabWidget()
tabWidget.addTab(OverallPlotOptions(plotObject, plotAttributes, plotType), 'Overall Plot Options')
if plotType == '2d':
tabWidget.addTab(IndividualPlotOptions(plotObject), 'Individual DataSet Options')
vboxLayout = QVBoxLayout()
vboxLayout.addWidget(tabWidget)
self.setLayout(vboxLayout)
def closeEvent(self, event):
# close main window
self.parentWindow.applicationObject.quit()
class OverallPlotOptions(QWidget):
"""
Class derived from PyQt5.QtWidgets.QWidget to generate the controls for the overall plot options.
Include controls for the font size, labels, data ranges and the ability to toggle the legend display.
:param plotObject: Plot object to be passed on to the children classes to access the plot figure and axis handles.
"""
def __init__(self, plotObject, plotAttributes, plotType):
self.plotInstance = plotObject
self.axisHandle = plotObject.plotAxis
self.figureHandle = plotObject.plotFigure
super().__init__()
self.userUpdatedRange = False
# Font size
yCoordinate = 20
self.fontSizeLabel = self.createTextBoxLabel(' font size = ', yCoordinate)
self.fontSize = self.createTextBox('%d' % self.axisHandle.title.get_fontsize(), yCoordinate, 100, self.updateFontSize,
setValidator=QIntValidator(2, 60))
# Title
deltaYCoordinate = 30
yCoordinate += deltaYCoordinate
self.plotTitleLabel = self.createTextBoxLabel(' title = ', yCoordinate)
self.plotTitle = self.createTextBox(self.axisHandle.get_title(), yCoordinate, 300, self.updatePlotTitle)
# X axislabel
yCoordinate += deltaYCoordinate
self.xAxisLabelLabel = self.createTextBoxLabel(' x label = ', yCoordinate)
self.xAxisLabel = self.createTextBox(self.axisHandle.get_xlabel(), yCoordinate, 300, self.updateXAxisLabel)
# Y axislabel
deltaYCoordinate = 25
yCoordinate += deltaYCoordinate
self.yAxisLabelLabel = self.createTextBoxLabel(' y label = ', yCoordinate)
self.yAxisLabel = self.createTextBox(self.axisHandle.get_ylabel(), yCoordinate, 300, self.updateYAxisLabel)
deltaYCoordinateZLabel = deltaYCoordinate
yCoordinateZLabel = yCoordinate + deltaYCoordinateZLabel
# X range
deltaYCoordinate = 35
yCoordinate += deltaYCoordinate if plotType == '2d' else deltaYCoordinate + deltaYCoordinateZLabel
self.xRangeLabel = self.createTextBoxLabel(' x range = ', yCoordinate)
self.xRangeLower = self.createTextBox(plotAttributes['xMin'], yCoordinate, 100, self.updateXRange )
self.xRangeUpper = self.createTextBox(plotAttributes['xMax'], yCoordinate, 100, self.updateXRange, moveX=205 )
self.xLog = self.createCheckBox('xlog', 350, yCoordinate, self.axisHandle.get_xscale() == 'log', self.toggleXScale)
self.xrange = plotObject.dataLimits['x']
# update textbox if zoom changes
self.axisHandle.callbacks.connect('xlim_changed', self.onXLimitChange)
# Y range
deltaYCoordinate = 25
yCoordinate += deltaYCoordinate
self.yRangeLabel = self.createTextBoxLabel(' y range = ', yCoordinate)
self.yRangeLower = self.createTextBox(plotAttributes['yMin'], yCoordinate, 100, self.updateYRange )
self.yRangeUpper = self.createTextBox(plotAttributes['yMax'], yCoordinate, 100, self.updateYRange, moveX=205 )
self.yLog = self.createCheckBox('ylog', 350, yCoordinate, self.axisHandle.get_yscale() == 'log',
self.toggleYScale)
self.yrange = plotObject.dataLimits['y']
# update textbox if zoom changes
self.axisHandle.callbacks.connect('ylim_changed', self.onYLimitChange)
# Grid checkbox
self.gridCheckBox = self.createCheckBox('Grid Lines', 600, 80, self.plotInstance.gridOn, self.toggleGrid)
if plotType == '2d':
self.legendDraggableNotScrollable = plotObject.legendDraggableNotScrollable
# Legend checkbox
self.legendVisibleCheckBox = \
self.createCheckBox('Show Legend', 600, 20, self.plotInstance.legend.get_visible(), self.toggleLegend)
# checkbox to fix legend location
if self.legendDraggableNotScrollable:
self.legendLocationFixedCheckBox = \
self.createCheckBox('Fix Legend Location', 600, 40, not self.legendDraggableNotScrollable,
self.fixLegendLocation)
else:
# Z axislabel
self.zAxisLabelLabel = self.createTextBoxLabel(' z label = ', yCoordinateZLabel)
self.zAxisLabel = self.createTextBox(self.axisHandle.get_zlabel(), yCoordinateZLabel, 300,
self.updateZAxisLabel)
# dropdown list for plot type
# self.addDropDownList(100, 'Plot Type', Surface Plot', )
def createTextBoxLabel(self, labelText, moveY, moveX=10):
"""
General method to create a PyQt5 label, i.e. PyQt5.QtWidgets.QLabel
:param labelText: Text to be placed in the label (string).
:param moveY: Vertical label location (float).
:param moveX: Horizontal label location (float).
"""
labelBox = QLabel(self)
labelBox.move(moveX, moveY)
labelBox.setText(labelText)
return labelBox
def createTextBox(self, defaultText, moveY, sizeX, updateMethod, setValidator=None, moveX=100, sizeY=20):
"""
General method to create a PyQt5 text box, i.e. PyQt5.QtWidgets.QLineEdit
:param defaultText: Initial text to be placed in text box (string).
:param moveY: Vertical textbox location (float).
:param sizeX: Horizontal textbox size (float).
:param updateMethod: Method to use after ENTER is pressed in textbox.
:param setValidator: Input to validate input text.
:param moveX: Horizontal textbox location (float).
:param sizeY: Vertical textbox size (float).
"""
textBox = QLineEdit(self)
if setValidator is not None:
textBox.setValidator(setValidator)
textBox.move(moveX, moveY)
textBox.resize(sizeX, sizeY)
textBox.setText(defaultText)
textBox.returnPressed.connect(updateMethod)
return textBox
def createCheckBox(self, label, moveX, moveY, checkRequired, updateMethod):
"""
General method to create a PyQt5 checkbox, i.e. PyQt5.QtWidgets.QCheckBox
:param label: Checkbox label (string).
:param moveX: Horizontal checkbox size (float).
:param moveY: Vertical checkbox size (float).
:param checkRequired: Default check state (bool).
:param updateMethod: Method to use after checkbox state change.
"""
checkBox = QCheckBox(label, self)
checkBox.move(moveX, moveY)
if checkRequired:
if not checkBox.isChecked():
checkBox.toggle()
else:
if checkBox.isChecked():
checkBox.toggle()
checkBox.stateChanged.connect(updateMethod)
return checkBox
def updateFontSize(self):
"""
Method to update font size for the plot title, x-axis label, y-axis labels, x-tick labels, and y-tick labels.
"""
newFontSize = self.fontSize.text()
axisList = [self.axisHandle.title]
axisList += [getattr(self.axisHandle, axisObject).label for axisObject in dir(self.axisHandle)
if re.match('^[xyz]axis$', axisObject)]
for axisObject in dir(self.axisHandle):
if re.match('^get_.ticklabels$', axisObject):
axisList += getattr(self.axisHandle, axisObject)()
for axisItem in axisList:
axisItem.set_fontsize(newFontSize)
self.axisHandle.axes.tick_params(labelsize=newFontSize)
self.axisHandle.xaxis.get_offset_text().set_fontsize(newFontSize)
self.axisHandle.yaxis.get_offset_text().set_fontsize(newFontSize)
if hasattr(self.axisHandle, 'zaxis'):
self.axisHandle.zaxis.get_offset_text().set_fontsize(newFontSize)
self.figureHandle.canvas.draw_idle()
def updateXAxisLabel(self):
"""
Method x-axis label text.
"""
newLabel = self.xAxisLabel.text()
self.axisHandle.set_xlabel(newLabel)
self.axisHandle.xaxis.label.set_fontsize(self.fontSize.text())
self.figureHandle.canvas.draw_idle()
def updateYAxisLabel(self):
"""
Method y-axis label text
"""
newLabel = self.yAxisLabel.text()
self.axisHandle.set_ylabel(newLabel)
self.axisHandle.yaxis.label.set_fontsize(self.fontSize.text())
self.figureHandle.canvas.draw_idle()
def updateZAxisLabel(self):
"""
Method y-axis label text
"""
newLabel = self.zAxisLabel.text()
self.axisHandle.set_zlabel(newLabel)
self.axisHandle.zaxis.label.set_fontsize(self.fontSize.text())
self.figureHandle.canvas.draw_idle()
def updatePlotTitle(self):
"""
Method to update main plot title.
"""
newTitle = self.plotTitle.text()
self.axisHandle.set_title(newTitle)
self.axisHandle.title.set_fontsize(self.fontSize.text())
self.figureHandle.canvas.draw_idle()
def onXLimitChange(self, axes):
"""
Method to update the x-axis limits checkbox when the actual axis limit changes via panning or zooming.
:param axes: The matplotlib axes variable.
"""
if self.userUpdatedRange:
return
self.userUpdatedRange = False
xLimits = axes.get_xlim()
self.xRangeLower.setText('%G' % xLimits[0])
self.xRangeUpper.setText('%G' % xLimits[1])
def updateXRange(self):
"""
Method to update x-axis range.
"""
self.userUpdatedRange = True
xrange = [self.xRangeLower.text(), self.xRangeUpper.text()]
xrange[0] = float(xrange[0]) if xrange[0] != '*' else self.xrange[0]
xrange[1] = float(xrange[1]) if xrange[1] != '*' else self.xrange[1]
self.axisHandle.set_xlim(xrange)
self.figureHandle.canvas.draw_idle()
def onYLimitChange(self, axes):
"""
Method to update the y-axis limits checkbox when the actual axis limit changes via panning or zooming.
:param axes: The matplotlib axes variable.
"""
if self.userUpdatedRange:
self.userUpdatedRange = False
return
yLimits = axes.get_ylim()
self.yRangeLower.setText('%G' % yLimits[0])
self.yRangeUpper.setText('%G' % yLimits[1])
def updateYRange(self):
"""
Method to update y-axis range.
"""
self.userUpdatedRange = True
yrange = [self.yRangeLower.text(), self.yRangeUpper.text()]
yrange[0] = float(yrange[0]) if yrange[0] != '*' else self.yrange[0]
yrange[1] = float(yrange[1]) if yrange[1] != '*' else self.yrange[1]
self.axisHandle.set_ylim(yrange)
self.figureHandle.canvas.draw_idle()
def toggleXScale(self):
"""
Method to toggle the x-axis between a linear and log scale.
"""
if self.xLog.isChecked():
if self.axisHandle.get_xscale() != 'log':
self.axisHandle.set_xscale('log')
elif self.axisHandle.get_xscale() != 'linear':
self.axisHandle.set_xscale('linear')
self.figureHandle.canvas.draw_idle()
def toggleYScale(self):
"""
Method to toggle the y-axis between a linear and log scale.
"""
if self.yLog.isChecked():
if self.axisHandle.get_yscale() != 'log':
self.axisHandle.set_yscale('log')
elif self.axisHandle.get_yscale() != 'linear':
self.axisHandle.set_yscale('linear')
self.figureHandle.canvas.draw_idle()
def toggleLegend(self):
"""
Method to toggle legend visibility.
"""
self.plotInstance.legend.set_visible(self.legendVisibleCheckBox.isChecked())
self.figureHandle.canvas.draw_idle()
def redrawPlot(self):
"""
Local method that calls the plotCanvas.redrawPlot method.
"""
self.plotInstance.redrawPlot()
def fixLegendLocation(self):
"""
Method to fix draggable legend location
"""
checkBoxState = self.legendLocationFixedCheckBox.isChecked()
self.plotInstance.legend.set_draggable(not checkBoxState)
def toggleGrid(self):
"""
Method to toggle plot axes grid.
"""
if self.gridCheckBox.isChecked():
self.plotInstance.gridOn = True
self.axisHandle.grid(True)
else:
self.plotInstance.gridOn = False
self.axisHandle.grid(False)
self.figureHandle.canvas.draw_idle()
class IndividualPlotOptions(QWidget):
"""
Class derived from PyQt5.QtWidgets.QWidget to generate the controls for the individual plot options.
Include controls for the individual plot display; label; line color, type and width; as well as the marker type and
size. This tab is allowed to be scrollable to allow for the control of a large number of plots.
:param plotObject: Plot object to be passed on to the children classes to access the plot figure and axis handles.
"""
def __init__(self, plotObject):
self.plotInstance = plotObject
self.axisHandle = plotObject.plotAxis
self.figureHandle = plotObject.plotFigure
super().__init__()
self.selectionNotAvailable = 'N/A'
self.colorDictionary = dict([(colorHex.lower(), colorName.split(':')[-1])
for colorName, colorHex in matplotlib.colors.TABLEAU_COLORS.items()])
self.markerStyleDictionary = \
OrderedDict([('None', 'None'), ('.', 'Point'), ('o', 'Circle'), ('v', 'Triangle Down'),
('^', 'Triangle Up'), ('<', 'Triangle Left'), ('>', 'Triangle Right'), ('s', 'Square'),
('p', 'Pentagon'), ('*', 'Star'), ('+', 'Plus'), ('x', 'X'), ('D', 'Diamond')])
self.lineStyleDictionary = OrderedDict([('-', 'Solid'), ('--', 'Dashed'), ('-.', 'Dash Dot'), (':', 'Dotted'),
('', 'None')])
availableColors = [self.colorDictionary[hexKey] for hexKey in
pyplot.rcParams['axes.prop_cycle'].by_key()['color']]
# headings
self.xSize = 100
self.ySize = 20
self.xSizeLabel = 2 * self.xSize
self.xSizeLineStyle = 1.5 * self.xSize
self.xSizeLineWidth = 0.25 * self.xSize
formLayout = self.headings()
# rows of widgets for each individual plot
self.widgetList = []
for i in range(len(self.axisHandle.lines)):
irow = i + 1
# row variables
legendLabel = self.axisHandle.legend_.legendHandles[i].get_label()
rowName = '%3.3d%s' % (i, legendLabel)
# checkbox: toggle plot selection
checkBox = self.addCheckBox(rowName, self.toggleSinglePlotVisibility)
formLayout.addWidget(checkBox, irow, 0)
self.widgetList.append(checkBox)
# label: plot label used in legend
widget = self.addTextBox(self.xSizeLabel, rowName, legendLabel, self.changePlotLabel)
formLayout.addWidget(widget, irow, 1)
self.widgetList.append(widget)
# dropdown list for plot line color selection
comboBox = self.addLineColorDropDown(rowName, self.axisHandle.lines[i].get_color(), availableColors)
formLayout.addWidget(comboBox, irow, 2)
self.widgetList.append(comboBox)
# dropdown list for plot line style selection
comboBox = self.addDropDownList(self.xSizeLineStyle, rowName, self.axisHandle.lines[i].get_linestyle(),
self.lineStyleDictionary, self.changeLineStyle, irow, 'line stye')
formLayout.addWidget(comboBox, irow, 3)
self.widgetList.append(comboBox)
# textbox for plot linewidth
textBox = self.addTextBox(self.xSizeLineWidth, rowName, '%.2f' % self.axisHandle.lines[i].get_linewidth(),
self.changeLineWidth, setValidator=QDoubleValidator(0.0, 20.0, 3))
formLayout.addWidget(textBox, irow, 4)
self.widgetList.append(textBox)
# dropdown list for marker style
comboBox = self.addDropDownList(self.xSizeLineStyle, rowName, self.axisHandle.lines[i].get_marker(),
self.markerStyleDictionary, self.changeMarkerStyle, irow, 'marker style')
formLayout.addWidget(comboBox, irow, 5)
self.widgetList.append(comboBox)
# textbox for plot marker size
textBox = self.addTextBox(self.xSizeLineWidth, rowName, '%.2f' % self.axisHandle.lines[i].get_markersize(),
self.changeMarkerSize, setValidator=QDoubleValidator(0.0, 20.0, 3))
formLayout.addWidget(textBox, irow, 6)
self.widgetList.append(textBox)
groupBox = QGroupBox()
formLayout.setColumnStretch(1, 4)
formLayout.setColumnStretch(2, 1)
formLayout.setColumnStretch(3, 1)
formLayout.setColumnStretch(4, 1)
formLayout.setColumnStretch(5, 1)
formLayout.setColumnStretch(6, 1)
groupBox.setLayout(formLayout)
scroll = QScrollArea()
scroll.setWidget(groupBox)
scroll.setWidgetResizable(True)
layout = QVBoxLayout(self)
layout.addWidget(scroll)
def redrawPlot(self):
"""
Local method that calls the plotCanvas.redrawPlot method.
"""
self.plotInstance.redrawPlot()
def headings(self, skip=False):
"""
Method to add headings above individual plot options ... currently not used since skip == True
:param skip: Option to add heading above individual plot options (bool)
"""
formLayout = QGridLayout()
if skip:
return formLayout
formLayout.addWidget(self.addCheckBox('Label', self.toggleAllPlotVisibility), 0, 0)
formLayout.addWidget(self.addLabel('Line Labels'), 0, 1)
formLayout.addWidget(self.addLabel('Line Color'), 0, 2)
formLayout.addWidget(self.addLabel('Line Style'), 0, 3)
formLayout.addWidget(self.addLabel('Line Width'), 0, 4)
formLayout.addWidget(self.addLabel('Marker Style'), 0, 5)
formLayout.addWidget(self.addLabel('Marker Size'), 0, 6)
return formLayout
def addLabel(self, labelText):
"""
General method to create a PyQt5 label, i.e. PyQt5.QtWidgets.QLabel
:param labelText: Text to be placed in the label (string).
"""
labelBox = QLabel(self)
labelBox.setText(labelText)
labelBox.setAlignment(Qt.AlignCenter)
labeFont = QFont()
labeFont.setBold(True)
labelBox.setFont(labeFont)
return labelBox
def addCheckBox(self, objectName, connectMethod):
"""
General method to create a PyQt5 checkbox, i.e. PyQt5.QtWidgets.QCheckBox
:param objectName: Checkbox object name (string).
:param connectMethod: Method to use after checkbox state change.
"""
# noinspection PyArgumentList
checkBox = QCheckBox('', self, objectName=objectName)
if not checkBox.isChecked():
checkBox.toggle()
checkBox.stateChanged.connect(connectMethod)
return checkBox
def addTextBox(self, xSize, objectName, defaultText, connectMethod=None, setValidator=None):
"""
General method to create a PyQt5 textbox, i.e. PyQt5.QtWidgets.QLineEdit
:param xSize: Horizontal textbox size (float).
:param objectName: Textbox object name (string).
:param defaultText: Initial text to be placed in textbox (string).
:param connectMethod: Method to use after ENTER is pressed in textbox.
:param setValidator: Input to validate input text.
"""
# noinspection PyArgumentList
textBox = QLineEdit(self, objectName=objectName)
textBox.resize(xSize, self.ySize)
textBox.setText(defaultText)
if setValidator is not None:
textBox.setValidator(setValidator)
if connectMethod is not None:
textBox.returnPressed.connect(connectMethod)
return textBox
def addDropDownList(self, xSize, objectName, defaultValue, itemDictionary, connectMethod, plotIndex, plotAttribute):
"""
General method to create a PyQt5 drop-down list, i.e. PyQt5.QtWidgets.QComboBox
:param xSize: Horizontal drop-down list size (float).
:param objectName: Drop-down list object name (string).
:param defaultValue: Default selected value for drop-down list (string).
:param itemDictionary: Ordered Dictionary with possible drop-down values.
:param connectMethod: Method to use after selected drop-down value changes.
:param plotIndex: Index of plot associated with the drop-down list.
:param plotAttribute: Plot attribute associated with the drop-down list.
"""
# noinspection PyArgumentList
comboBox = QComboBox(self, objectName=objectName)
for dropDownItem in itemDictionary.values():
comboBox.addItem(dropDownItem)
indexComboBox = comboBox.findText(itemDictionary[defaultValue], Qt.MatchFixedString)
if indexComboBox >= 0:
comboBox.setCurrentIndex(indexComboBox)
else:
comboBox.addItem(self.selectionNotAvailable)
comboBox.setCurrentIndex(-1)
warnings.warn('Default %s for plot number %d may be used but is intentionally not made available in the '
'drop-down list.' % (plotAttribute, plotIndex))
comboBox.resize(xSize, self.ySize)
# noinspection PyUnresolvedReferences
comboBox.currentIndexChanged.connect(connectMethod)
return comboBox
def addLineColorDropDown(self, objectName, newColor, availableColors):
"""
Method to add drop-down list for plot line colors.
In addition to the color names, the corresponding backgrounds are also changed to the corresponding colors.
:param objectName: Drop-down list object name (string).
:param newColor: New selected color for drop-down list (string).
:param availableColors: List of available colors.
"""
# noinspection PyArgumentList
comboBox = QComboBox(self, objectName=objectName)
for colorIndex in range(len(availableColors)):
comboBox.addItem(availableColors[colorIndex])
# selection background color: requires self.applicationObject.setStyle("fusion")
comboBox.model().item(colorIndex).setBackground(QColor(availableColors[colorIndex]))
indexComboBox = comboBox.findText(self.colorDictionary[newColor], Qt.MatchFixedString)
if indexComboBox >= 0:
comboBox.setCurrentIndex(indexComboBox)
colors = comboBox.model().item(indexComboBox).background().color()
palette = comboBox.palette()
palette.setColor(QPalette.Button, colors)
comboBox.setPalette(palette)
comboBox.resize(self.xSize, self.ySize)
# noinspection PyUnresolvedReferences
comboBox.currentIndexChanged.connect(self.changeLineColor)
return comboBox
def toggleAllPlotVisibility(self):
"""
Method to toggle the visibility of all plots (either all visible or all invisible)
When all plots are made invisible the warning 'No handles with labels found to put in legend.'
is written to the terminal.
"""
checkBox = self.sender()
checkBoxName = checkBox.objectName()
checkBoxState = Qt.Checked if checkBox.isChecked() else Qt.Unchecked
for widget in self.widgetList:
if isinstance(widget, QCheckBox) and widget.objectName() != checkBoxName:
widget.setCheckState(checkBoxState)
def toggleSinglePlotVisibility(self):
"""
Method to toggle the visibility of individual plots.
"""
checkBox = self.sender()
checkBoxName = checkBox.objectName()
plotIndex = int(checkBoxName[:3])
if checkBox.isChecked():
self.axisHandle.lines[plotIndex].set_visible(True)
self.axisHandle.lines[plotIndex].set_label(checkBoxName[3:])
else:
self.axisHandle.lines[plotIndex].set_visible(False)
self.axisHandle.lines[plotIndex].set_label('_nolegend_')
self.redrawPlot()
def changePlotLabel(self):
"""
Method to update the label of an individual plot after triggering of a UI event.
This method is typically called after ENTER is pressed in the corresponding textbox.
"""
textBox = self.sender()
textBoxContent = textBox.text()
textBoxName = textBox.objectName()
newTextBoxName = textBoxName[:3] + textBoxContent
for widget in self.widgetList:
if widget.objectName() == textBoxName:
widget.setObjectName(newTextBoxName)
plotIndex = int(textBoxName[:3])
if self.axisHandle.lines[plotIndex].get_label() != '_nolegend_':
self.axisHandle.lines[plotIndex].set_label(textBoxContent)
self.redrawPlot()
def changeLineColor(self):
"""
Method to update the line color of an individual plot after triggering of a UI event.
This method is called after a new color is selected on the corresponding drop-down list.
"""
comboBox = self.sender()
objectName = comboBox.objectName()
plotIndex = int(objectName[:3])
colorName = comboBox.currentText()
colorHex = [x[0] for x in self.colorDictionary.items() if x[1] == colorName][0]
self.axisHandle.lines[plotIndex].set_color(colorHex)
self.redrawPlot()
# selection background color: requires self.applicationObject.setStyle("fusion")
selectedIndex = comboBox.currentIndex()
colors = comboBox.model().item(selectedIndex).background().color()
palette = comboBox.palette()
palette.setColor(QPalette.Button, colors)
comboBox.setPalette(palette)
def changeLineStyle(self):
"""
Method to update the line style of an individual plot after triggering of a UI event.
This method is called after a new line style is selected on the corresponding drop-down list.
"""
comboBox = self.sender()
objectName = comboBox.objectName()
plotIndex = int(objectName[:3])
lineStyle = [styleEntry[0] for styleEntry in self.lineStyleDictionary.items()
if styleEntry[1] == comboBox.currentText()][0]
self.axisHandle.lines[plotIndex].set_linestyle(lineStyle)
self.redrawPlot()
def changeLineWidth(self):
"""
Method to update the line width of an individual plot after triggering of a UI event.
This method is typically called after ENTER is pressed in the corresponding textbox.
"""
textBox = self.sender()
objectName = textBox.objectName()
plotIndex = int(objectName[:3])
self.axisHandle.lines[plotIndex].set_linewidth(textBox.text())
self.redrawPlot()
def changeMarkerStyle(self):
"""
Method to update the marker style of an individual plot after triggering of a UI event.
This method is called after a new marker style is selected on the corresponding drop-down list.
"""
comboBox = self.sender()
objectName = comboBox.objectName()
plotIndex = int(objectName[:3])
markerStyle = [styleEntry[0] for styleEntry in self.markerStyleDictionary.items()
if styleEntry[1] == comboBox.currentText()][0]
self.axisHandle.lines[plotIndex].set_marker(markerStyle)
self.redrawPlot()
def changeMarkerSize(self):
"""
Method to update the marker size of an individual plot after triggering of a UI event.
This method is typically called after ENTER is pressed in the corresponding textbox.
"""
textBox = self.sender()
objectName = textBox.objectName()
plotIndex = int(objectName[:3])
self.axisHandle.lines[plotIndex].set_markersize(textBox.text())
self.redrawPlot()
| 41.128571 | 126 | 0.648923 |
24b3ba1e898598f18a5ee0c19432e3d8abc3b576 | 212 | py | Python | metaphor/bigquery/__main__.py | MetaphorData/connectors | dd0e2092f791196dd9becce7da28923a4875370d | [
"Apache-2.0"
] | 5 | 2021-08-24T17:46:48.000Z | 2022-01-21T03:42:31.000Z | metaphor/bigquery/__main__.py | MetaphorData/connectors | dd0e2092f791196dd9becce7da28923a4875370d | [
"Apache-2.0"
] | 142 | 2021-09-02T22:16:54.000Z | 2022-03-31T15:23:34.000Z | metaphor/bigquery/__main__.py | MetaphorData/connectors | dd0e2092f791196dd9becce7da28923a4875370d | [
"Apache-2.0"
] | null | null | null | from metaphor.common.cli import cli_main
from .extractor import BigQueryExtractor, BigQueryRunConfig
if __name__ == "__main__":
cli_main("BigQuery metadata extractor", BigQueryRunConfig, BigQueryExtractor)
| 30.285714 | 81 | 0.816038 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.