hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7519c4ab911ea14338546376a2c65e817f2a158d
| 2,035
|
py
|
Python
|
scripts/ann_architectures/imagenet/train_vgg16.py
|
embeddedlabsiu/snn_exploration_spinnaker
|
5cde5f4c705719058bc0b913f2353eac3bd580b3
|
[
"MIT"
] | 1
|
2020-06-05T13:04:29.000Z
|
2020-06-05T13:04:29.000Z
|
scripts/ann_architectures/imagenet/train_vgg16.py
|
embeddedlabsiu/snn_exploration_spinnaker
|
5cde5f4c705719058bc0b913f2353eac3bd580b3
|
[
"MIT"
] | null | null | null |
scripts/ann_architectures/imagenet/train_vgg16.py
|
embeddedlabsiu/snn_exploration_spinnaker
|
5cde5f4c705719058bc0b913f2353eac3bd580b3
|
[
"MIT"
] | null | null | null |
import os
import json
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.imagenet_utils import preprocess_input
from keras.applications.vgg16 import VGG16
from keras.callbacks import TensorBoard
from keras.metrics import top_k_categorical_accuracy
from keras.optimizers import SGD
data_path = '/home/rbodo/.snntoolbox/Datasets/imagenet'
train_path = os.path.join(data_path, 'training')
test_path = os.path.join(data_path, 'validation')
class_idx_path = os.path.join(data_path, 'imagenet_class_index_1000.json')
log_path = '/home/rbodo/.snntoolbox/data/imagenet/vgg16_trained2'
num_classes = 100
print("Instantiating model...")
model = VGG16(weights=None, classes=num_classes)
sgd = SGD(0.01)
model.compile(sgd, 'categorical_crossentropy',
['accuracy']) # , top_k_categorical_accuracy])
# Get dataset
print("Loading dataset...")
class_idx = json.load(open(class_idx_path, "r"))
classes = [class_idx[str(idx)][0] for idx in range(len(class_idx))]
classes = classes[:num_classes]
target_size = (224, 224)
batch_size = 1
nb_train_samples = 129395 # 1281167
nb_train_steps = nb_train_samples / batch_size
nb_val_samples = 50000 # 50000
nb_val_steps = nb_val_samples / batch_size
nb_epoch = 10
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
trainflow = datagen.flow_from_directory(
train_path, target_size, classes=classes, batch_size=batch_size)
testflow = datagen.flow_from_directory(
test_path, target_size, classes=classes, batch_size=nb_val_samples)
testdata = testflow.next()
# print("Evaluating initial model...")
# score = model.evaluate_generator(testflow, nb_val_steps)
# print("Validation accuracy: {} top-1, {} top-5".format(score[1], score[2]))
# 0.15% and 0.54%
print("Training...")
gradients = TensorBoard(log_path + '/logs_32bit', 1, write_grads=True)
model.fit_generator(trainflow, nb_train_steps, nb_epoch, verbose=1,
validation_data=testdata, callbacks=[gradients])
model.save(os.path.join(log_path, 'vgg16.h5'))
| 35.086207
| 77
| 0.770516
|
f667e94c9288e56eb047b3ef17233efafcaddfce
| 917
|
py
|
Python
|
main.py
|
kopeadri/BSI-Project
|
28484314b52aaaf85351c9e24370db06be8bb67d
|
[
"BSD-2-Clause"
] | null | null | null |
main.py
|
kopeadri/BSI-Project
|
28484314b52aaaf85351c9e24370db06be8bb67d
|
[
"BSD-2-Clause"
] | null | null | null |
main.py
|
kopeadri/BSI-Project
|
28484314b52aaaf85351c9e24370db06be8bb67d
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import argparse
import sys
from Analyzer import Analyzer
from ArgParser import ArgParser
def main():
# arguments = sys.argv[1:] # omit program name
arguments = ['-f', 'Resources\Pcap2.pcap']
# arguments = ['-f', 'Resources\wireshark3.pcap']
# arguments = ['-c', '500']
# arguments = ['--help']
# arguments = ['-f', 'Resources\Pcap2.pcap', '-c', '50'] # w takiej sytuacji bierzemy pod uwagę tylko argument -c
# arguments = ['-c', '-500'] # komunikat, że nie może być <= 0
# arguments = ['-f', 'Resources\Pcap45.pcap'] # komunikat, że ścieżka nie istnieje
# arguments = [] # komunikat Arguments not provided!
arg_parser = ArgParser()
args = arg_parser.parse_arguments(arguments)
arg_parser.print_args(args)
analyzer = Analyzer(args.file, args.capture_packets)
analyzer.run()
if __name__ == "__main__":
print("AnalyzerApp START")
main()
| 28.65625
| 118
| 0.649945
|
7efd2e8974664dfda568c0c18662764deb6917df
| 912
|
py
|
Python
|
apps/app_user/migrations/0002_userlogin.py
|
ocswor/clendar_backend
|
810835941d371ee4ba13bee3dd0c1be130bee7d2
|
[
"MIT"
] | null | null | null |
apps/app_user/migrations/0002_userlogin.py
|
ocswor/clendar_backend
|
810835941d371ee4ba13bee3dd0c1be130bee7d2
|
[
"MIT"
] | null | null | null |
apps/app_user/migrations/0002_userlogin.py
|
ocswor/clendar_backend
|
810835941d371ee4ba13bee3dd0c1be130bee7d2
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.2 on 2019-12-29 12:16
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_user', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserLogin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_grading', models.CharField(max_length=32, verbose_name='时间粒度')),
('login_time', models.DateTimeField(verbose_name='登录时间')),
('user',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_user.User', verbose_name='用户')),
],
options={
'verbose_name': '用户登录',
'verbose_name_plural': '用户登录',
},
),
]
| 32.571429
| 120
| 0.569079
|
c3b63d6f836ae00b1b189fc6626a6a1356931c99
| 6,565
|
py
|
Python
|
tests/bootstrap.py
|
hnakamur/trafficserver-deb
|
60efe9253292f7a4fb8c37430a12ce9056190711
|
[
"Apache-2.0"
] | null | null | null |
tests/bootstrap.py
|
hnakamur/trafficserver-deb
|
60efe9253292f7a4fb8c37430a12ce9056190711
|
[
"Apache-2.0"
] | null | null | null |
tests/bootstrap.py
|
hnakamur/trafficserver-deb
|
60efe9253292f7a4fb8c37430a12ce9056190711
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# this script sets up the testing packages to allow the tests
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import argparse
import os
import subprocess
import platform
import sys
pip_packages = [
"autest==1.10.0",
"hyper",
"requests",
"dnslib",
"httpbin",
"gunicorn",
"traffic-replay" # this should install TRLib, MicroServer, MicroDNS, Traffic-Replay
]
distro_packages = {
"RHEL": [
"install epel-release",
"install python36",
"install rh-python36-python-virtualenv"
],
"Fedora": [
"install python3",
"install python3-virtualenv",
"install python-virtualenv",
],
"Ubuntu": [
"install python3",
"install python3-virtualenv",
"install virtualenv",
"install python3-dev"
],
"CentOS": [
"install epel-release",
"install rh-python36-python-virtualenv"
],
"CentOS-8": [
"install epel-release",
"install python3-virtualenv"
]
}
def command_output(cmd_str):
print(cmd_str)
proc = subprocess.Popen(
cmd_str,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
# while command runs get output
while proc.poll() == None:
tmp = proc.stdout.readline()
sys.stdout.write(tmp)
for last_output in proc.stdout.readlines():
sys.stdout.write(last_output)
return proc.returncode
def get_distro():
return platform.linux_distribution()
def distro_version():
return int(get_distro()[1].split(".")[0])
def isFedora():
return get_distro()[0].startswith("Fedora")
def isCentOS():
return get_distro()[0].startswith("CentOS")
def distro():
if isFedora():
return "Fedora"
if isCentOS():
return "CentOS"
if get_distro()[0].startswith("Red Hat"):
return "RHEL"
if get_distro()[0].startswith("Ubuntu"):
return "Ubuntu"
def isRedHatBased():
return get_distro()[0].startswith("Red Hat") or get_distro()[0].startswith(
"Fedora") or get_distro()[0].startswith("CentOS")
def isInstalled(prog):
out = subprocess.Popen(
["which", prog], stdout=subprocess.PIPE).communicate()
if out[0] != '':
return True
return False
def installManagerName():
if isRedHatBased() and distro_version() >= 22:
ret = "sudo dnf -y" # Fedora 22 or newer
elif isRedHatBased():
ret = "sudo yum -y" # Red Hat distro
else:
ret = "sudo apt-get -y" # Ubuntu/Debian
return ret
def installToolName():
if isRedHatBased():
ret = "rpm -ihv" # Red Hat Based
else:
ret = "dpkg -iv" # Ubuntu/Debian
return ret
def run_cmds(cmds):
for cmd in cmds:
# print (cmd.split[" "])
# subprocess.call(cmd.split[" "])
if command_output(cmd):
print("'{0}'' - Failed".format(cmd))
def gen_package_cmds(packages):
# main install tool/manager (yum, dnf, apt-get, etc)
mtool = installManagerName()
# core install tool (rpm, dpkg, etc)
itool = installToolName()
ret = []
for p in packages:
if p.startswith("wget"):
pth = p[5:]
pack = os.path.split(pth)[1]
cmd = ["wget {0}".format(pth), "{0} ./{1}".format(itool, pack)]
else:
cmd = ["{0} {1}".format(mtool, p)]
ret.extend(cmd)
return ret
extra = ''
if distro() == 'RHEL' or (distro() == 'CentOS' and distro_version() < 8):
extra = ". /opt/rh/rh-python36/enable ;"
def venv_cmds(path):
'''
Create virtual environment and add it
to the path being used for the script
'''
return [
# first command only needed for rhel and centos systems at this time
extra + " virtualenv --python=python3 {0}".format(path),
extra + " {0}/bin/pip install pip setuptools --upgrade".format(path)
]
def main():
" main script logic"
parser = argparse.ArgumentParser()
parser.add_argument(
"--use-pip", nargs='?', default="pip", help="Which pip to use")
parser.add_argument(
"venv_path",
nargs='?',
default="env-test",
help="The directory to us to for the virtualenv")
parser.add_argument(
"--disable-virtualenv",
default=False,
action='store_true',
help="Do not create virtual environment to install packages under")
parser.add_argument(
'-V', '--version', action='version', version='%(prog)s 1.0.0')
args = parser.parse_args()
# print(args)
# print(get_distro())
# do we know of packages to install for the given platform
dist = distro()
cmds = []
# if centos 8 we must set crypto to legacy to allow tlsv1.0 tests
if dist:
if distro() == 'CentOS' and distro_version() > 7:
cmds += ["sudo update-crypto-policies --set LEGACY"]
if dist:
if distro() == 'CentOS' and distro_version() > 7:
cmds += gen_package_cmds(distro_packages['CentOS-8'])
else:
cmds += gen_package_cmds(distro_packages[dist])
# test to see if we should use a certain version of pip
path_to_pip = None
if args.use_pip != "pip":
path_to_pip = args.use_pip
# install on the system, or use virtualenv for pip based stuff
if not args.disable_virtualenv:
# Create virtual env
cmds += venv_cmds(args.venv_path)
if path_to_pip is None:
path_to_pip = os.path.join(args.venv_path, "bin", args.use_pip)
cmds += [extra + "{0} install {1}".format(path_to_pip, " ".join(pip_packages))]
run_cmds(cmds)
if __name__ == '__main__':
main()
| 26.051587
| 88
| 0.618126
|
dab60c7749f91caec80bbc52f08b0fddeeaadb80
| 4,562
|
py
|
Python
|
pysnmp/H3C-EPON-FB-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/H3C-EPON-FB-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/H3C-EPON-FB-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module H3C-EPON-FB-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/H3C-EPON-FB-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:09:05 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint")
h3cEpon, = mibBuilder.importSymbols("HUAWEI-3COM-OID-MIB", "h3cEpon")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Bits, ModuleIdentity, Gauge32, Counter32, iso, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Counter64, Integer32, MibIdentifier, ObjectIdentity, Unsigned32, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "ModuleIdentity", "Gauge32", "Counter32", "iso", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Counter64", "Integer32", "MibIdentifier", "ObjectIdentity", "Unsigned32", "IpAddress")
DisplayString, RowStatus, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TextualConvention")
h3cEponFBMibObjects = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 10, 2, 42, 6))
if mibBuilder.loadTexts: h3cEponFBMibObjects.setLastUpdated('200711271008Z')
if mibBuilder.loadTexts: h3cEponFBMibObjects.setOrganization('Hangzhou H3C Technologies Co., Ltd.')
h3cEponFBMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 42, 6, 1))
h3cEponFBMIBTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 42, 6, 1, 1), )
if mibBuilder.loadTexts: h3cEponFBMIBTable.setStatus('current')
h3cEponFBMIBEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 42, 6, 1, 1, 1), ).setIndexNames((0, "H3C-EPON-FB-MIB", "h3cEponFBGroupIndex"))
if mibBuilder.loadTexts: h3cEponFBMIBEntry.setStatus('current')
h3cEponFBGroupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 42, 6, 1, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: h3cEponFBGroupIndex.setStatus('current')
h3cEponFBGroupRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 42, 6, 1, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cEponFBGroupRowStatus.setStatus('current')
h3cEponFBMasterPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 42, 6, 1, 1, 1, 3), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cEponFBMasterPort.setStatus('current')
h3cEponFBSlavePort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 42, 6, 1, 1, 1, 4), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cEponFBSlavePort.setStatus('current')
h3cEponFBMasterPortStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 42, 6, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cEponFBMasterPortStatus.setStatus('current')
h3cEponFBSlavePortStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 42, 6, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ready", 1), ("down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cEponFBSlavePortStatus.setStatus('current')
h3cEponFBSwitchover = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 42, 6, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("false", 1), ("true", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cEponFBSwitchover.setStatus('current')
mibBuilder.exportSymbols("H3C-EPON-FB-MIB", PYSNMP_MODULE_ID=h3cEponFBMibObjects, h3cEponFBGroupIndex=h3cEponFBGroupIndex, h3cEponFBSlavePort=h3cEponFBSlavePort, h3cEponFBSlavePortStatus=h3cEponFBSlavePortStatus, h3cEponFBMIB=h3cEponFBMIB, h3cEponFBMasterPort=h3cEponFBMasterPort, h3cEponFBMIBTable=h3cEponFBMIBTable, h3cEponFBSwitchover=h3cEponFBSwitchover, h3cEponFBMIBEntry=h3cEponFBMIBEntry, h3cEponFBMibObjects=h3cEponFBMibObjects, h3cEponFBGroupRowStatus=h3cEponFBGroupRowStatus, h3cEponFBMasterPortStatus=h3cEponFBMasterPortStatus)
| 120.052632
| 538
| 0.769838
|
870f9b029ceef184386229c3ae8362700d6d39a5
| 32,802
|
py
|
Python
|
tests/rule_based_profiler/parameter_builder/test_mean_unexpected_map_metric_multi_batch_parameter_builder.py
|
chrisarnold91/great_expectations
|
9ea7be63b1219a70f49114dee5c433fd8470ffd2
|
[
"Apache-2.0"
] | null | null | null |
tests/rule_based_profiler/parameter_builder/test_mean_unexpected_map_metric_multi_batch_parameter_builder.py
|
chrisarnold91/great_expectations
|
9ea7be63b1219a70f49114dee5c433fd8470ffd2
|
[
"Apache-2.0"
] | null | null | null |
tests/rule_based_profiler/parameter_builder/test_mean_unexpected_map_metric_multi_batch_parameter_builder.py
|
chrisarnold91/great_expectations
|
9ea7be63b1219a70f49114dee5c433fd8470ffd2
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, Dict, List, Optional
import numpy as np
import pytest
from great_expectations import DataContext
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.rule_based_profiler.config import ParameterBuilderConfig
from great_expectations.rule_based_profiler.helpers.util import (
get_parameter_value_and_validate_return_type,
)
from great_expectations.rule_based_profiler.parameter_builder import (
MeanUnexpectedMapMetricMultiBatchParameterBuilder,
MetricMultiBatchParameterBuilder,
)
from great_expectations.rule_based_profiler.types import (
DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
Domain,
ParameterContainer,
)
from tests.rule_based_profiler.conftest import ATOL, RTOL
def test_instantiation_mean_unexpected_map_metric_multi_batch_parameter_builder(
bobby_columnar_table_multi_batch_deterministic_data_context,
):
data_context: DataContext = (
bobby_columnar_table_multi_batch_deterministic_data_context
)
_: MeanUnexpectedMapMetricMultiBatchParameterBuilder = (
MeanUnexpectedMapMetricMultiBatchParameterBuilder(
name="my_name",
map_metric_name="column_values.nonnull",
total_count_parameter_builder_name="my_total_count",
data_context=data_context,
)
)
def test_instantiation_mean_unexpected_map_metric_multi_batch_parameter_builder_required_arguments_absent(
bobby_columnar_table_multi_batch_deterministic_data_context,
):
data_context: DataContext = (
bobby_columnar_table_multi_batch_deterministic_data_context
)
with pytest.raises(TypeError) as excinfo:
# noinspection PyArgumentList
_: MeanUnexpectedMapMetricMultiBatchParameterBuilder = (
MeanUnexpectedMapMetricMultiBatchParameterBuilder(
name="my_name",
map_metric_name="column_values.nonnull",
data_context=data_context,
)
)
assert (
"__init__() missing 1 required positional argument: 'total_count_parameter_builder_name'"
in str(excinfo.value)
)
with pytest.raises(TypeError) as excinfo:
# noinspection PyArgumentList
_: MeanUnexpectedMapMetricMultiBatchParameterBuilder = (
MeanUnexpectedMapMetricMultiBatchParameterBuilder(
name="my_name",
total_count_parameter_builder_name="my_total_count",
data_context=data_context,
)
)
assert (
"__init__() missing 1 required positional argument: 'map_metric_name'"
in str(excinfo.value)
)
def test_mean_unexpected_map_metric_multi_batch_parameter_builder_bobby_numeric_dependencies_evaluated_separately(
bobby_columnar_table_multi_batch_deterministic_data_context,
):
data_context: DataContext = (
bobby_columnar_table_multi_batch_deterministic_data_context
)
batch_request: dict = {
"datasource_name": "taxi_pandas",
"data_connector_name": "monthly",
"data_asset_name": "my_reports",
}
my_total_count_metric_multi_batch_parameter_builder: MetricMultiBatchParameterBuilder = MetricMultiBatchParameterBuilder(
name="my_total_count",
metric_name="table.row_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
enforce_numeric_metric=False,
replace_nan_with_zero=False,
reduce_scalar_metric=True,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
data_context=data_context,
)
my_null_count_metric_multi_batch_parameter_builder: MetricMultiBatchParameterBuilder = MetricMultiBatchParameterBuilder(
name="my_null_count",
metric_name="column_values.nonnull.unexpected_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
enforce_numeric_metric=False,
replace_nan_with_zero=False,
reduce_scalar_metric=True,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
data_context=data_context,
)
mean_unexpected_map_metric_multi_batch_parameter_builder: MeanUnexpectedMapMetricMultiBatchParameterBuilder = MeanUnexpectedMapMetricMultiBatchParameterBuilder(
name="my_passenger_count_values_not_null_mean_unexpected_map_metric",
map_metric_name="column_values.nonnull",
total_count_parameter_builder_name="my_total_count",
null_count_parameter_builder_name="my_null_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
data_context=data_context,
)
metric_domain_kwargs: dict = {"column": "passenger_count"}
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN, domain_kwargs=metric_domain_kwargs
)
variables: Optional[ParameterContainer] = None
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
my_total_count_metric_multi_batch_parameter_builder.build_parameters(
domain=domain,
variables=variables,
parameters=parameters,
)
my_null_count_metric_multi_batch_parameter_builder.build_parameters(
domain=domain,
variables=variables,
parameters=parameters,
)
mean_unexpected_map_metric_multi_batch_parameter_builder.build_parameters(
domain=domain,
variables=variables,
parameters=parameters,
)
expected_parameter_value: float = 0.0
actual_parameter_value: Optional[
Any
] = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=mean_unexpected_map_metric_multi_batch_parameter_builder.fully_qualified_parameter_name,
expected_return_type=None,
variables=variables,
parameters=parameters,
)
rtol: float = RTOL
atol: float = 5.0e-1 * ATOL
np.testing.assert_allclose(
actual=actual_parameter_value.value,
desired=expected_parameter_value,
rtol=rtol,
atol=atol,
err_msg=f"Actual value of {actual_parameter_value.value} differs from expected value of {expected_parameter_value} by more than {atol + rtol * abs(actual_parameter_value.value)} tolerance.",
)
def test_mean_unexpected_map_metric_multi_batch_parameter_builder_bobby_numeric_dependencies_evaluated_in_parameter_builder(
bobby_columnar_table_multi_batch_deterministic_data_context,
):
data_context: DataContext = (
bobby_columnar_table_multi_batch_deterministic_data_context
)
batch_request: dict = {
"datasource_name": "taxi_pandas",
"data_connector_name": "monthly",
"data_asset_name": "my_reports",
}
my_total_count_metric_multi_batch_parameter_builder_config: ParameterBuilderConfig = ParameterBuilderConfig(
module_name="great_expectations.rule_based_profiler.parameter_builder",
class_name="MetricMultiBatchParameterBuilder",
name="my_total_count",
metric_name="table.row_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
enforce_numeric_metric=False,
replace_nan_with_zero=False,
reduce_scalar_metric=True,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
)
my_null_count_metric_multi_batch_parameter_builder_config: ParameterBuilderConfig = ParameterBuilderConfig(
module_name="great_expectations.rule_based_profiler.parameter_builder",
class_name="MetricMultiBatchParameterBuilder",
name="my_null_count",
metric_name="column_values.nonnull.unexpected_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
enforce_numeric_metric=False,
replace_nan_with_zero=False,
reduce_scalar_metric=True,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
)
evaluation_parameter_builder_configs: Optional[List[ParameterBuilderConfig]] = [
my_total_count_metric_multi_batch_parameter_builder_config,
my_null_count_metric_multi_batch_parameter_builder_config,
]
mean_unexpected_map_metric_multi_batch_parameter_builder: MeanUnexpectedMapMetricMultiBatchParameterBuilder = MeanUnexpectedMapMetricMultiBatchParameterBuilder(
name="my_passenger_count_values_not_null_mean_unexpected_map_metric",
map_metric_name="column_values.nonnull",
total_count_parameter_builder_name="my_total_count",
null_count_parameter_builder_name="my_null_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
evaluation_parameter_builder_configs=evaluation_parameter_builder_configs,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
data_context=data_context,
)
metric_domain_kwargs: dict = {"column": "passenger_count"}
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN, domain_kwargs=metric_domain_kwargs
)
variables: Optional[ParameterContainer] = None
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
mean_unexpected_map_metric_multi_batch_parameter_builder.build_parameters(
domain=domain,
variables=variables,
parameters=parameters,
)
expected_parameter_value: float = 0.0
actual_parameter_value: Optional[
Any
] = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=mean_unexpected_map_metric_multi_batch_parameter_builder.fully_qualified_parameter_name,
expected_return_type=None,
variables=variables,
parameters=parameters,
)
rtol: float = RTOL
atol: float = 5.0e-1 * ATOL
np.testing.assert_allclose(
actual=actual_parameter_value.value,
desired=expected_parameter_value,
rtol=rtol,
atol=atol,
err_msg=f"Actual value of {actual_parameter_value.value} differs from expected value of {expected_parameter_value} by more than {atol + rtol * abs(actual_parameter_value.value)} tolerance.",
)
def test_mean_unexpected_map_metric_multi_batch_parameter_builder_bobby_numeric_dependencies_evaluated_mixed(
bobby_columnar_table_multi_batch_deterministic_data_context,
):
data_context: DataContext = (
bobby_columnar_table_multi_batch_deterministic_data_context
)
batch_request: dict = {
"datasource_name": "taxi_pandas",
"data_connector_name": "monthly",
"data_asset_name": "my_reports",
}
my_total_count_metric_multi_batch_parameter_builder_config: ParameterBuilderConfig = ParameterBuilderConfig(
module_name="great_expectations.rule_based_profiler.parameter_builder",
class_name="MetricMultiBatchParameterBuilder",
name="my_total_count",
metric_name="table.row_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
enforce_numeric_metric=False,
replace_nan_with_zero=False,
reduce_scalar_metric=True,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
)
my_null_count_metric_multi_batch_parameter_builder: MetricMultiBatchParameterBuilder = MetricMultiBatchParameterBuilder(
name="my_null_count",
metric_name="column_values.nonnull.unexpected_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
enforce_numeric_metric=False,
replace_nan_with_zero=False,
reduce_scalar_metric=True,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
data_context=data_context,
)
evaluation_parameter_builder_configs: Optional[List[ParameterBuilderConfig]] = [
my_total_count_metric_multi_batch_parameter_builder_config,
]
mean_unexpected_map_metric_multi_batch_parameter_builder: MeanUnexpectedMapMetricMultiBatchParameterBuilder = MeanUnexpectedMapMetricMultiBatchParameterBuilder(
name="my_passenger_count_values_not_null_mean_unexpected_map_metric",
map_metric_name="column_values.nonnull",
total_count_parameter_builder_name="my_total_count",
null_count_parameter_builder_name="my_null_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
evaluation_parameter_builder_configs=evaluation_parameter_builder_configs,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
data_context=data_context,
)
metric_domain_kwargs: dict = {"column": "passenger_count"}
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN, domain_kwargs=metric_domain_kwargs
)
variables: Optional[ParameterContainer] = None
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
my_null_count_metric_multi_batch_parameter_builder.build_parameters(
domain=domain,
variables=variables,
parameters=parameters,
)
mean_unexpected_map_metric_multi_batch_parameter_builder.build_parameters(
domain=domain,
variables=variables,
parameters=parameters,
)
expected_parameter_value: float = 0.0
actual_parameter_value: Optional[
Any
] = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=mean_unexpected_map_metric_multi_batch_parameter_builder.fully_qualified_parameter_name,
expected_return_type=None,
variables=variables,
parameters=parameters,
)
rtol: float = RTOL
atol: float = 5.0e-1 * ATOL
np.testing.assert_allclose(
actual=actual_parameter_value.value,
desired=expected_parameter_value,
rtol=rtol,
atol=atol,
err_msg=f"Actual value of {actual_parameter_value.value} differs from expected value of {expected_parameter_value} by more than {atol + rtol * abs(actual_parameter_value.value)} tolerance.",
)
def test_mean_unexpected_map_metric_multi_batch_parameter_builder_bobby_datetime_dependencies_evaluated_separately(
bobby_columnar_table_multi_batch_deterministic_data_context,
):
data_context: DataContext = (
bobby_columnar_table_multi_batch_deterministic_data_context
)
batch_request: dict = {
"datasource_name": "taxi_pandas",
"data_connector_name": "monthly",
"data_asset_name": "my_reports",
}
my_total_count_metric_multi_batch_parameter_builder: MetricMultiBatchParameterBuilder = MetricMultiBatchParameterBuilder(
name="my_total_count",
metric_name="table.row_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
enforce_numeric_metric=False,
replace_nan_with_zero=False,
reduce_scalar_metric=True,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
data_context=data_context,
)
my_null_count_metric_multi_batch_parameter_builder: MetricMultiBatchParameterBuilder = MetricMultiBatchParameterBuilder(
name="my_null_count",
metric_name="column_values.nonnull.unexpected_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
enforce_numeric_metric=False,
replace_nan_with_zero=False,
reduce_scalar_metric=True,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
data_context=data_context,
)
mean_unexpected_map_metric_multi_batch_parameter_builder: MeanUnexpectedMapMetricMultiBatchParameterBuilder = MeanUnexpectedMapMetricMultiBatchParameterBuilder(
name="my_pickup_datetime_count_values_unique_mean_unexpected_map_metric",
map_metric_name="column_values.nonnull",
total_count_parameter_builder_name="my_total_count",
null_count_parameter_builder_name="my_null_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
data_context=data_context,
)
metric_domain_kwargs: dict = {"column": "pickup_datetime"}
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN, domain_kwargs=metric_domain_kwargs
)
variables: Optional[ParameterContainer] = None
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
my_total_count_metric_multi_batch_parameter_builder.build_parameters(
domain=domain,
variables=variables,
parameters=parameters,
)
my_null_count_metric_multi_batch_parameter_builder.build_parameters(
domain=domain,
variables=variables,
parameters=parameters,
)
mean_unexpected_map_metric_multi_batch_parameter_builder.build_parameters(
domain=domain,
variables=variables,
parameters=parameters,
)
expected_parameter_value: float = 3.89e-3
actual_parameter_value: Optional[
Any
] = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=mean_unexpected_map_metric_multi_batch_parameter_builder.fully_qualified_parameter_name,
expected_return_type=None,
variables=variables,
parameters=parameters,
)
rtol: float = RTOL
atol: float = 5.0e-1 * ATOL
np.testing.assert_allclose(
actual=actual_parameter_value.value,
desired=expected_parameter_value,
rtol=rtol,
atol=atol,
err_msg=f"Actual value of {actual_parameter_value.value} differs from expected value of {expected_parameter_value} by more than {atol + rtol * abs(actual_parameter_value.value)} tolerance.",
)
def test_mean_unexpected_map_metric_multi_batch_parameter_builder_bobby_datetime_dependencies_evaluated_in_parameter_builder(
bobby_columnar_table_multi_batch_deterministic_data_context,
):
data_context: DataContext = (
bobby_columnar_table_multi_batch_deterministic_data_context
)
batch_request: dict = {
"datasource_name": "taxi_pandas",
"data_connector_name": "monthly",
"data_asset_name": "my_reports",
}
my_total_count_metric_multi_batch_parameter_builder_config: ParameterBuilderConfig = ParameterBuilderConfig(
module_name="great_expectations.rule_based_profiler.parameter_builder",
class_name="MetricMultiBatchParameterBuilder",
name="my_total_count",
metric_name="table.row_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
enforce_numeric_metric=False,
replace_nan_with_zero=False,
reduce_scalar_metric=True,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
)
my_null_count_metric_multi_batch_parameter_builder_config: ParameterBuilderConfig = ParameterBuilderConfig(
module_name="great_expectations.rule_based_profiler.parameter_builder",
class_name="MetricMultiBatchParameterBuilder",
name="my_null_count",
metric_name="column_values.nonnull.unexpected_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
enforce_numeric_metric=False,
replace_nan_with_zero=False,
reduce_scalar_metric=True,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
)
evaluation_parameter_builder_configs: Optional[List[ParameterBuilderConfig]] = [
my_total_count_metric_multi_batch_parameter_builder_config,
my_null_count_metric_multi_batch_parameter_builder_config,
]
mean_unexpected_map_metric_multi_batch_parameter_builder: MeanUnexpectedMapMetricMultiBatchParameterBuilder = MeanUnexpectedMapMetricMultiBatchParameterBuilder(
name="my_pickup_datetime_count_values_unique_mean_unexpected_map_metric",
map_metric_name="column_values.nonnull",
total_count_parameter_builder_name="my_total_count",
null_count_parameter_builder_name="my_null_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
evaluation_parameter_builder_configs=evaluation_parameter_builder_configs,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
data_context=data_context,
)
metric_domain_kwargs: dict = {"column": "pickup_datetime"}
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN, domain_kwargs=metric_domain_kwargs
)
variables: Optional[ParameterContainer] = None
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
mean_unexpected_map_metric_multi_batch_parameter_builder.build_parameters(
domain=domain,
variables=variables,
parameters=parameters,
)
expected_parameter_value: float = 3.89e-3
actual_parameter_value: Optional[
Any
] = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=mean_unexpected_map_metric_multi_batch_parameter_builder.fully_qualified_parameter_name,
expected_return_type=None,
variables=variables,
parameters=parameters,
)
rtol: float = RTOL
atol: float = 5.0e-1 * ATOL
np.testing.assert_allclose(
actual=actual_parameter_value.value,
desired=expected_parameter_value,
rtol=rtol,
atol=atol,
err_msg=f"Actual value of {actual_parameter_value.value} differs from expected value of {expected_parameter_value} by more than {atol + rtol * abs(actual_parameter_value.value)} tolerance.",
)
def test_mean_unexpected_map_metric_multi_batch_parameter_builder_bobby_datetime_dependencies_evaluated_mixed(
bobby_columnar_table_multi_batch_deterministic_data_context,
):
data_context: DataContext = (
bobby_columnar_table_multi_batch_deterministic_data_context
)
batch_request: dict = {
"datasource_name": "taxi_pandas",
"data_connector_name": "monthly",
"data_asset_name": "my_reports",
}
my_total_count_metric_multi_batch_parameter_builder: MetricMultiBatchParameterBuilder = MetricMultiBatchParameterBuilder(
name="my_total_count",
metric_name="table.row_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
enforce_numeric_metric=False,
replace_nan_with_zero=False,
reduce_scalar_metric=True,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
data_context=data_context,
)
my_null_count_metric_multi_batch_parameter_builder_config: ParameterBuilderConfig = ParameterBuilderConfig(
module_name="great_expectations.rule_based_profiler.parameter_builder",
class_name="MetricMultiBatchParameterBuilder",
name="my_null_count",
metric_name="column_values.nonnull.unexpected_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
enforce_numeric_metric=False,
replace_nan_with_zero=False,
reduce_scalar_metric=True,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
)
evaluation_parameter_builder_configs: Optional[List[ParameterBuilderConfig]] = [
my_null_count_metric_multi_batch_parameter_builder_config,
]
mean_unexpected_map_metric_multi_batch_parameter_builder: MeanUnexpectedMapMetricMultiBatchParameterBuilder = MeanUnexpectedMapMetricMultiBatchParameterBuilder(
name="my_pickup_datetime_count_values_unique_mean_unexpected_map_metric",
map_metric_name="column_values.nonnull",
total_count_parameter_builder_name="my_total_count",
null_count_parameter_builder_name="my_null_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
evaluation_parameter_builder_configs=evaluation_parameter_builder_configs,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
data_context=data_context,
)
metric_domain_kwargs: dict = {"column": "pickup_datetime"}
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN, domain_kwargs=metric_domain_kwargs
)
variables: Optional[ParameterContainer] = None
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
my_total_count_metric_multi_batch_parameter_builder.build_parameters(
domain=domain,
variables=variables,
parameters=parameters,
)
mean_unexpected_map_metric_multi_batch_parameter_builder.build_parameters(
domain=domain,
variables=variables,
parameters=parameters,
)
expected_parameter_value: float = 3.89e-3
actual_parameter_value: Optional[
Any
] = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=mean_unexpected_map_metric_multi_batch_parameter_builder.fully_qualified_parameter_name,
expected_return_type=None,
variables=variables,
parameters=parameters,
)
rtol: float = RTOL
atol: float = 5.0e-1 * ATOL
np.testing.assert_allclose(
actual=actual_parameter_value.value,
desired=expected_parameter_value,
rtol=rtol,
atol=atol,
err_msg=f"Actual value of {actual_parameter_value.value} differs from expected value of {expected_parameter_value} by more than {atol + rtol * abs(actual_parameter_value.value)} tolerance.",
)
def test_mean_unexpected_map_metric_multi_batch_parameter_builder_bobby_check_serialized_keys_no_evaluation_parameter_builder_configs(
bobby_columnar_table_multi_batch_deterministic_data_context,
):
data_context: DataContext = (
bobby_columnar_table_multi_batch_deterministic_data_context
)
batch_request: dict = {
"datasource_name": "taxi_pandas",
"data_connector_name": "monthly",
"data_asset_name": "my_reports",
}
mean_unexpected_map_metric_multi_batch_parameter_builder: MeanUnexpectedMapMetricMultiBatchParameterBuilder = MeanUnexpectedMapMetricMultiBatchParameterBuilder(
name="my_pickup_datetime_count_values_unique_mean_unexpected_map_metric",
map_metric_name="column_values.nonnull",
total_count_parameter_builder_name="my_total_count",
null_count_parameter_builder_name="my_null_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
data_context=data_context,
)
# Note: "evaluation_parameter_builder_configs" is not one of "ParameterBuilder" formal property attributes.
assert set(
mean_unexpected_map_metric_multi_batch_parameter_builder.to_json_dict().keys()
) == {
"class_name",
"module_name",
"name",
"map_metric_name",
"total_count_parameter_builder_name",
"null_count_parameter_builder_name",
"metric_domain_kwargs",
"metric_value_kwargs",
"json_serialize",
"batch_request",
}
def test_mean_unexpected_map_metric_multi_batch_parameter_builder_bobby_check_serialized_keys_with_evaluation_parameter_builder_configs(
bobby_columnar_table_multi_batch_deterministic_data_context,
):
data_context: DataContext = (
bobby_columnar_table_multi_batch_deterministic_data_context
)
batch_request: dict = {
"datasource_name": "taxi_pandas",
"data_connector_name": "monthly",
"data_asset_name": "my_reports",
}
my_total_count_metric_multi_batch_parameter_builder_config: ParameterBuilderConfig = ParameterBuilderConfig(
module_name="great_expectations.rule_based_profiler.parameter_builder",
class_name="MetricMultiBatchParameterBuilder",
name="my_total_count",
metric_name="table.row_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
enforce_numeric_metric=False,
replace_nan_with_zero=False,
reduce_scalar_metric=True,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
)
my_null_count_metric_multi_batch_parameter_builder_config: ParameterBuilderConfig = ParameterBuilderConfig(
module_name="great_expectations.rule_based_profiler.parameter_builder",
class_name="MetricMultiBatchParameterBuilder",
name="my_null_count",
metric_name="column_values.nonnull.unexpected_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
enforce_numeric_metric=False,
replace_nan_with_zero=False,
reduce_scalar_metric=True,
evaluation_parameter_builder_configs=None,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
)
evaluation_parameter_builder_configs: Optional[List[ParameterBuilderConfig]] = [
my_total_count_metric_multi_batch_parameter_builder_config,
my_null_count_metric_multi_batch_parameter_builder_config,
]
mean_unexpected_map_metric_multi_batch_parameter_builder: MeanUnexpectedMapMetricMultiBatchParameterBuilder = MeanUnexpectedMapMetricMultiBatchParameterBuilder(
name="my_pickup_datetime_count_values_unique_mean_unexpected_map_metric",
map_metric_name="column_values.nonnull",
total_count_parameter_builder_name="my_total_count",
null_count_parameter_builder_name="my_null_count",
metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
metric_value_kwargs=None,
evaluation_parameter_builder_configs=evaluation_parameter_builder_configs,
json_serialize=False,
batch_list=None,
batch_request=batch_request,
data_context=data_context,
)
# Note: "evaluation_parameter_builder_configs" is not one of "ParameterBuilder" formal property attributes.
assert set(
mean_unexpected_map_metric_multi_batch_parameter_builder.to_json_dict().keys()
) == {
"class_name",
"module_name",
"name",
"map_metric_name",
"total_count_parameter_builder_name",
"null_count_parameter_builder_name",
"metric_domain_kwargs",
"metric_value_kwargs",
"json_serialize",
"batch_request",
}
| 39.236842
| 198
| 0.750869
|
52e3fb8fd7b2003933db1324a4cfb056ce2ca853
| 434
|
py
|
Python
|
diffpads_plugin.py
|
Tuetuopay/diffpads
|
91706199177ccaf3a19a832a524ba1ffce9761c5
|
[
"Beerware"
] | 1
|
2021-10-15T05:41:26.000Z
|
2021-10-15T05:41:26.000Z
|
diffpads_plugin.py
|
Tuetuopay/diffpads
|
91706199177ccaf3a19a832a524ba1ffce9761c5
|
[
"Beerware"
] | null | null | null |
diffpads_plugin.py
|
Tuetuopay/diffpads
|
91706199177ccaf3a19a832a524ba1ffce9761c5
|
[
"Beerware"
] | null | null | null |
#!/usr/bin/env python
# Diff Pads for pcbnew using bezier curves as an exit
from pcbnew import ActionPlugin, GetBoard
from .diffpads_dialog import init_diffpads_dialog
class DiffPadsPlugin(ActionPlugin):
def defaults(self):
self.name = "DiffPads"
self.category = "Modify PCB"
self.description = "Creates good-looking differential pads exits"
def Run(self):
init_diffpads_dialog(GetBoard())
| 27.125
| 73
| 0.721198
|
394ae8303457f915ea5e532379c76445f40d1961
| 1,164
|
py
|
Python
|
setup.py
|
qwertypool/django-simple-menu
|
7a1eff63e6d2d15b4265207cfa5cf79dedf4b9a4
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
qwertypool/django-simple-menu
|
7a1eff63e6d2d15b4265207cfa5cf79dedf4b9a4
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
qwertypool/django-simple-menu
|
7a1eff63e6d2d15b4265207cfa5cf79dedf4b9a4
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
import re
from setuptools import setup
# load our version from our init file
init_data = open('menu/__init__.py').read()
matches = re.search(r"__version__ = '([^']+)'", init_data, re.M)
if matches:
version = matches.group(1)
else:
raise RuntimeError("Unable to load version")
requirements = [
'setuptools',
'Django'
]
setup(name='django-simple-menu',
packages=['menu'],
include_package_data=True,
version=version,
description='Simple, yet powerful, code-based menus for Django applications',
long_description=open('README.rst').read(),
author='Evan Borgstrom',
author_email='evan@borgstrom.ca',
url='https://github.com/jazzband/django-simple-menu',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=requirements)
| 29.846154
| 83
| 0.640034
|
958c0eda37fa32fda2940b5af7198bb60d3609f7
| 558
|
py
|
Python
|
packages/python/plotly/plotly/validators/heatmapgl/colorbar/_tickmode.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/heatmapgl/colorbar/_tickmode.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/heatmapgl/colorbar/_tickmode.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class TickmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="tickmode", parent_name="heatmapgl.colorbar", **kwargs
):
super(TickmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
values=kwargs.pop("values", ["auto", "linear", "array"]),
**kwargs,
)
| 34.875
| 80
| 0.636201
|
d839c481189cb3fa9644f6624d0a0ac2ab7924ed
| 6,646
|
py
|
Python
|
configs/sku110k_cascade_rcnn_x101_32x4d_fpn_anchor_1x_fold3_2s.py
|
tyomj/product_detection
|
380cfdd7eb581971649857132080d19e7a7f698d
|
[
"Apache-2.0"
] | 49
|
2020-06-16T02:23:21.000Z
|
2022-03-28T03:52:53.000Z
|
configs/sku110k_cascade_rcnn_x101_32x4d_fpn_anchor_1x_fold3_2s.py
|
tyomj/product_detection
|
380cfdd7eb581971649857132080d19e7a7f698d
|
[
"Apache-2.0"
] | 5
|
2020-06-26T08:43:26.000Z
|
2021-09-17T13:34:53.000Z
|
configs/sku110k_cascade_rcnn_x101_32x4d_fpn_anchor_1x_fold3_2s.py
|
tyomj/product_detection
|
380cfdd7eb581971649857132080d19e7a7f698d
|
[
"Apache-2.0"
] | 7
|
2020-06-23T06:30:25.000Z
|
2021-12-15T02:02:50.000Z
|
# model settings
model = dict(
type='CascadeRCNN',
num_stages=2,
#pretrained='torchvision://resnet50',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[4],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=2,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=2,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
])
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=500))
# dataset settings
dataset_type = 'CocoDataset'
data_root = '/home/artem-nb/Datasets/SKU110K_fixed/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(768, 1024), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(768, 1024),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=3,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/train_3.json',
img_prefix=data_root + 'images/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/val_3.json',
img_prefix=data_root + 'images/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/val_3.json',
img_prefix=data_root + 'images/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = '/home/artem-nb/Projects/SKU110k/work_dirs/cascade_rcnn_x101_32x4d_fpn_anchor_1x_fold3_2s'
load_from = '/home/artem-nb/Projects/mmdetection/models/cascade_rcnn_x101_32x4d_fpn_1x_20190501-af628be5.pth'
resume_from = None
workflow = [('train', 1)]
| 31.647619
| 109
| 0.566205
|
76a5bf9c2fa3f2d04ef87ad980f7df644efd2946
| 7,419
|
py
|
Python
|
Models/base_model.py
|
nguyenvo09/EACL2021
|
9d04d8954c1ded2110daac23117de11221f08cc6
|
[
"MIT"
] | 27
|
2021-01-18T16:03:17.000Z
|
2022-03-05T22:38:34.000Z
|
Models/base_model.py
|
Jason98Xu/GET
|
6860c87425619954cacbf5a14ad20befd18ec818
|
[
"MIT"
] | null | null | null |
Models/base_model.py
|
Jason98Xu/GET
|
6860c87425619954cacbf5a14ad20befd18ec818
|
[
"MIT"
] | 2
|
2022-03-16T03:22:16.000Z
|
2022-03-27T03:12:14.000Z
|
"""
Base model for all matching model
"""
from torch import nn
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.param import Param
from matchzoo.engine import hyper_spaces
import typing
from matchzoo.utils import parse
import numpy as np
import torch
import torch_utils as my_utils
class BaseModel(nn.Module):
def __init__(self):
super().__init__()
pass
def get_default_params(self,
with_embedding = False,
with_multi_layer_perceptron = False):
"""
Model default parameters.
The common usage is to instantiate :class:`matchzoo.engine.ModelParams`
first, then set the model specific parametrs.
Examples:
>>> class MyModel(BaseModel):
... def build(self):
... print(self._params['num_eggs'], 'eggs')
... print('and', self._params['ham_type'])
...
...
... def get_default_params(self):
... params = ParamTable()
... params.add(Param('num_eggs', 512))
... params.add(Param('ham_type', 'Parma Ham'))
... return params
>>> my_model = MyModel()
>>> my_model.build()
512 eggs
and Parma Ham
Notice that all parameters must be serialisable for the entire model
to be serialisable. Therefore, it's strongly recommended to use python
native data types to store parameters.
:return: model parameters
"""
params = ParamTable()
params.add(Param(
name = 'model_class', value = self.__class__.__name__,
desc = "Model class. Used internally for save/load. "
"Changing this may cause unexpected behaviors."
))
params.add(Param(
name = 'input_shapes',
desc = "Dependent on the model and data. Should be set manually."
))
params.add(Param(
name = 'task',
desc = "Decides model output shape, loss, and metrics."
))
params.add(Param(
name = 'optimizer', value = 'adam',
))
if with_embedding:
params.add(Param(
name = 'with_embedding', value = True,
desc = "A flag used help `auto` module. Shouldn't be changed."
))
params.add(Param(
name = 'embedding_input_dim',
desc = 'Usually equals vocab size + 1. Should be set manually.'
))
params.add(Param(
name = 'embedding_output_dim',
desc = 'Should be set manually.'
))
params.add(Param(
name = 'embedding_trainable', value = True,
desc = '`True` to enable embedding layer training, '
'`False` to freeze embedding parameters.'
))
if with_multi_layer_perceptron:
params.add(Param(
name = 'with_multi_layer_perceptron', value = True,
desc = "A flag of whether a multiple layer perceptron is used. "
"Shouldn't be changed."
))
params.add(Param(
name = 'mlp_num_units', value = 128,
desc = "Number of units in first `mlp_num_layers` layers.",
hyper_space = hyper_spaces.quniform(8, 256, 8)
))
params.add(Param(
name = 'mlp_num_layers', value = 3,
desc = "Number of layers of the multiple layer percetron.",
hyper_space = hyper_spaces.quniform(1, 6)
))
params.add(Param(
name = 'mlp_num_fan_out', value = 64,
desc = "Number of units of the layer that connects the multiple "
"layer percetron and the output.",
hyper_space = hyper_spaces.quniform(4, 128, 4)
))
params.add(Param(
name = 'mlp_activation_func', value = 'relu',
desc = 'Activation function used in the multiple '
'layer perceptron.'
))
return params
def _make_perceptron_layer(
self,
in_features: int = 0,
out_features: int = 0,
activation: nn.Module = nn.ReLU
) -> nn.Module:
""":return: a perceptron layer."""
return nn.Sequential(
nn.Linear(in_features, out_features),
activation
)
def _make_output_layer(
self,
in_features: int = 0,
activation: typing.Union[str, nn.Module] = None
) -> nn.Module:
""":return: a correctly shaped torch module for model output."""
if activation:
return nn.Sequential(
nn.Linear(in_features, 1),
parse.parse_activation(activation)
)
else:
return nn.Linear(in_features, 1)
def _make_default_embedding_layer(
self, _params) -> nn.Module:
""":return: an embedding module."""
if isinstance(_params['embedding'], np.ndarray):
_params['embedding_input_dim'] = (
_params['embedding'].shape[0]
)
_params['embedding_output_dim'] = (
_params['embedding'].shape[1]
)
return nn.Embedding.from_pretrained(
embeddings=torch.Tensor(_params['embedding']),
freeze=_params['embedding_freeze']
)
else:
return nn.Embedding(
num_embeddings=_params['embedding_input_dim'],
embedding_dim=_params['embedding_output_dim']
)
def _make_default_char_embedding_layer(
self, _params) -> nn.Module:
""":return: an embedding module."""
if isinstance(_params['char_embedding'], np.ndarray):
_params['char_embedding_input_dim'] = (
_params['char_embedding'].shape[0]
)
_params['char_embedding_output_dim'] = (
_params['char_embedding'].shape[1]
)
return nn.Embedding.from_pretrained(
embeddings=torch.Tensor(_params['char_embedding']),
freeze=_params['char_embedding_freeze']
)
else:
return nn.Embedding(
num_embeddings=_params['char_embedding_input_dim'],
embedding_dim=_params['char_embedding_output_dim']
)
def _make_entity_embedding_layer(
self, matrix: np.ndarray, freeze: bool) -> nn.Module:
""":return: an embedding module."""
return nn.Embedding.from_pretrained(
embeddings = torch.Tensor(matrix), freeze = freeze)
def predict(self, query: np.ndarray, doc: np.ndarray, verbose: bool = False, **kargs) -> np.ndarray:
self.train(False) # very important, to disable dropout
if verbose:
print("query: ", query)
print("doc: ", doc)
print("================ end of query doc =================")
out = self(query, doc, verbose, **kargs)
return my_utils.cpu(out).detach().numpy().flatten()
def forward(self, *input):
pass
if __name__ == '__main__':
print("here")
| 36.367647
| 104
| 0.536056
|
7cb8b3ff68c149bf06c603d044e28bd4d09d6030
| 6,293
|
py
|
Python
|
cnn_architectures/mixup/resnet_cifar/train_cifar10_leilaclip_mixup_largesample_baseline_5625.py
|
leilayasmeen/MSc_Thesis
|
ee5e1782ab4a1d86c5dc0f5dc4111b4432ae204d
|
[
"MIT"
] | 2
|
2019-10-29T03:26:20.000Z
|
2021-03-07T10:02:39.000Z
|
cnn_architectures/mixup/resnet_cifar/train_cifar10_leilaclip_mixup_largesample_baseline_5625.py
|
leilayasmeen/MSc_Thesis
|
ee5e1782ab4a1d86c5dc0f5dc4111b4432ae204d
|
[
"MIT"
] | null | null | null |
cnn_architectures/mixup/resnet_cifar/train_cifar10_leilaclip_mixup_largesample_baseline_5625.py
|
leilayasmeen/MSc_Thesis
|
ee5e1782ab4a1d86c5dc0f5dc4111b4432ae204d
|
[
"MIT"
] | null | null | null |
# Training procedure for CIFAR-10 using ResNet 110.
# ResNet model from https://github.com/BIGBALLON/cifar-10-cnn/blob/master/4_Residual_Network/ResNet_keras.py
import keras
import numpy as np
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
from keras.layers import Conv2D, Dense, Input, add, Activation, GlobalAveragePooling2D
from keras.callbacks import LearningRateScheduler, TensorBoard, ModelCheckpoint
from keras.models import Model
from keras import optimizers, regularizers
from sklearn.model_selection import train_test_split
import pickle
# Constants
stack_n = 18
num_classes = 10
img_rows, img_cols = 32, 32
img_channels = 3
batch_size = 128
epochs = 200
iterations = 50625 // batch_size #LEILAEDIT
weight_decay = 0.0001
seed = 333
def scheduler(epoch):
if epoch < 80:
return 0.1
if epoch < 150:
return 0.01
return 0.001
def residual_network(img_input,classes_num=10,stack_n=5):
def residual_block(intput,out_channel,increase=False):
if increase:
stride = (2,2)
else:
stride = (1,1)
pre_bn = BatchNormalization()(intput)
pre_relu = Activation('relu')(pre_bn)
conv_1 = Conv2D(out_channel,kernel_size=(3,3),strides=stride,padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(pre_relu)
bn_1 = BatchNormalization()(conv_1)
relu1 = Activation('relu')(bn_1)
conv_2 = Conv2D(out_channel,kernel_size=(3,3),strides=(1,1),padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(relu1)
if increase:
projection = Conv2D(out_channel,
kernel_size=(1,1),
strides=(2,2),
padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(intput)
block = add([conv_2, projection])
else:
block = add([intput,conv_2])
return block
# build model
# total layers = stack_n * 3 * 2 + 2
# stack_n = 5 by default, total layers = 32
# input: 32x32x3 output: 32x32x16
x = Conv2D(filters=16,kernel_size=(3,3),strides=(1,1),padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(img_input)
# input: 32x32x16 output: 32x32x16
for _ in range(stack_n):
x = residual_block(x,16,False)
# input: 32x32x16 output: 16x16x32
x = residual_block(x,32,True)
for _ in range(1,stack_n):
x = residual_block(x,32,False)
# input: 16x16x32 output: 8x8x64
x = residual_block(x,64,True)
for _ in range(1,stack_n):
x = residual_block(x,64,False)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
# input: 64 output: 10
x = Dense(classes_num,activation='softmax',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(x)
return x
if __name__ == '__main__':
# load data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train45, x_val, y_train45, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=seed) # random_state = seed
# load augmentation sets
x_train45_additions = np.load('Augmentation_Sets/x_augmentation_array_mixup_baseline.npy')
x_train45_additions = x_train45_additions.transpose(0,2,3,1)
y_train45_additions = np.load('Augmentation_Sets/y_augmentation_array_mixup_baseline.npy')
y_train45_additions = y_train45_additions.reshape(-1,num_classes)
# Adjust size of augmentation array
x_train45_additions = x_train45_additions[0:5625,:]
y_train45_additions = y_train45_additions[0:5625,:]
# concatenate with initial training set
x_train45 = np.concatenate((x_train45, x_train45_additions),axis=0)
y_train45 = np.concatenate((y_train45, y_train45_additions), axis=0)
# Normalize data with per-pixel mean
img_mean = x_train45.mean(axis=0) # per-pixel mean
img_std = x_train45.std(axis=0)
x_train45 = (x_train45-img_mean)/img_std
x_val = (x_val-img_mean)/img_std
x_test = (x_test-img_mean)/img_std
# build network
img_input = Input(shape=(img_rows,img_cols,img_channels))
output = residual_network(img_input,num_classes,stack_n)
resnet = Model(img_input, output)
print(resnet.summary())
# set optimizer
sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True, clipnorm=1.)
resnet.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# set callback
cbks = [LearningRateScheduler(scheduler)]
# set data augmentation
print('Using real-time data augmentation.')
datagen = ImageDataGenerator(horizontal_flip=True,
width_shift_range=0.125,
height_shift_range=0.125,
fill_mode='constant',cval=0.)
datagen.fit(x_train45)
# start training
hist = resnet.fit_generator(datagen.flow(x_train45, y_train45,batch_size=batch_size),
steps_per_epoch=iterations,
epochs=epochs,
callbacks=cbks,
validation_data=(x_val, y_val))
resnet.save('resnet_110_45kclip_mixup_largesample_baseline_5625.h5')
print("Get test accuracy:")
loss, accuracy = resnet.evaluate(x_test, y_test, verbose=0)
print("Test: accuracy1 = %f ; loss1 = %f" % (accuracy, loss))
print("Pickle models history")
with open('hist_110_cifar10_v2_45kclip_mixup_largesample_baseline_5625.p', 'wb') as f:
pickle.dump(hist.history, f)
| 38.139394
| 132
| 0.646115
|
a39fa0eda38817c03cb5f5faa5ae3af5490e5fe0
| 3,082
|
py
|
Python
|
datasets/sampler.py
|
ztmo520/NeuralRecon
|
576c2573fc5028c5a6116d344a0774cb95737621
|
[
"Apache-2.0"
] | 1,001
|
2021-04-02T01:28:39.000Z
|
2022-03-31T21:43:39.000Z
|
datasets/sampler.py
|
ztmo520/NeuralRecon
|
576c2573fc5028c5a6116d344a0774cb95737621
|
[
"Apache-2.0"
] | 79
|
2021-05-07T17:37:46.000Z
|
2022-03-30T10:37:27.000Z
|
datasets/sampler.py
|
ztmo520/NeuralRecon
|
576c2573fc5028c5a6116d344a0774cb95737621
|
[
"Apache-2.0"
] | 160
|
2021-04-02T12:20:18.000Z
|
2022-03-31T17:15:48.000Z
|
import math
import torch
import torch.distributed as dist
from torch.utils.data.distributed import Sampler
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
shuffle (optional): If true (default), sampler will shuffle the indices
.. warning::
In distributed mode, calling the ``set_epoch`` method is needed to
make shuffling work; each process will use the same random seed
otherwise.
Example::
>>> sampler = DistributedSampler(dataset) if is_distributed else None
>>> loader = DataLoader(dataset, shuffle=(sampler is None),
... sampler=sampler)
>>> for epoch in range(start_epoch, n_epochs):
... if is_distributed:
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
# indices = indices[self.rank:self.total_size:self.num_replicas]
subsample_num = self.total_size // self.num_replicas
subsample_begin = subsample_num * self.rank
indices = indices[subsample_begin:subsample_begin + subsample_num]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 37.13253
| 86
| 0.652174
|
1b673900d60ea7e274b3c3a9b652e7ba2fe751b6
| 1,045
|
py
|
Python
|
GestureAgents/System.py
|
chaosct/GestureAgents
|
9ec0adb1e59bf995d5808431edd4cb8bf8907728
|
[
"MIT"
] | 1
|
2015-01-22T10:42:09.000Z
|
2015-01-22T10:42:09.000Z
|
GestureAgents/System.py
|
chaosct/GestureAgents
|
9ec0adb1e59bf995d5808431edd4cb8bf8907728
|
[
"MIT"
] | null | null | null |
GestureAgents/System.py
|
chaosct/GestureAgents
|
9ec0adb1e59bf995d5808431edd4cb8bf8907728
|
[
"MIT"
] | null | null | null |
from GestureAgents.Events import Event
import GestureAgents.Reactor as Reactor
class System(object):
"""
Reference implementation of a system holding a recognizer tree
"""
def __init__(self, sources=None):
self.new_agents = {}
self.recognizers = []
self.sources = sources or []
self.running = False
def newAgent(self, recognizer):
if recognizer not in self.new_agents:
if recognizer in self.sources:
self.new_agents[recognizer] = recognizer.newAgent
else:
self.new_agents[recognizer] = Event()
self.recognizers.append(recognizer(self))
return self.new_agents[recognizer]
def run_apps(self):
"Minimal reactor loop for the event system to work"
self.running = True
while self.running:
self.update()
Reactor.run_all_now()
def stop(self):
self.running=False
def update(self):
'just override this method'
pass
| 26.794872
| 66
| 0.605742
|
2bf085515b46f7d3ae630838ce60d27825ec8b82
| 254
|
py
|
Python
|
backend/note_api/urls.py
|
gorkemarslan/note-me
|
6f622237184c513ecd6dd34d5b5912d6def5e238
|
[
"MIT"
] | null | null | null |
backend/note_api/urls.py
|
gorkemarslan/note-me
|
6f622237184c513ecd6dd34d5b5912d6def5e238
|
[
"MIT"
] | null | null | null |
backend/note_api/urls.py
|
gorkemarslan/note-me
|
6f622237184c513ecd6dd34d5b5912d6def5e238
|
[
"MIT"
] | null | null | null |
from django.urls import path
from note_api import views
urlpatterns = [
path('', views.get_routes, name='routes'),
path('notes/', views.get_note_list, name='note_list'),
path('notes/<int:pk>/', views.get_note_detail, name='note_detail'),
]
| 25.4
| 71
| 0.692913
|
78568d7abdc5bfdb8450276e6a86a51c0d6ab556
| 465
|
py
|
Python
|
apps/stock/migrations/0006_auto_20210124_1236.py
|
kwanj-k/ctrim_api
|
e3ed4afcbcc138400f219f3637b51514e2696e5c
|
[
"MIT"
] | 1
|
2018-03-11T06:08:13.000Z
|
2018-03-11T06:08:13.000Z
|
apps/stock/migrations/0006_auto_20210124_1236.py
|
kwanj-k/ctrim_api
|
e3ed4afcbcc138400f219f3637b51514e2696e5c
|
[
"MIT"
] | 4
|
2019-07-22T14:19:35.000Z
|
2022-02-10T09:13:08.000Z
|
apps/stock/migrations/0006_auto_20210124_1236.py
|
kwanj-k/ctrim_api
|
e3ed4afcbcc138400f219f3637b51514e2696e5c
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.9 on 2021-01-24 12:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stock', '0005_auto_20210124_1040'),
]
operations = [
migrations.AlterModelOptions(
name='stockproduct',
options={},
),
migrations.AlterUniqueTogether(
name='stockproduct',
unique_together={('stock', 'product')},
),
]
| 21.136364
| 51
| 0.574194
|
14f5725b86c7dc73f566ba23c8b1cfdc5060ef12
| 15,043
|
py
|
Python
|
test/functional/wallet_bumpfee.py
|
team247/woodcore
|
da770f4e5427ec73b1ad43846e889fc4ddb7e6c6
|
[
"MIT"
] | 1
|
2021-06-15T08:48:42.000Z
|
2021-06-15T08:48:42.000Z
|
test/functional/wallet_bumpfee.py
|
team247/woodcore
|
da770f4e5427ec73b1ad43846e889fc4ddb7e6c6
|
[
"MIT"
] | null | null | null |
test/functional/wallet_bumpfee.py
|
team247/woodcore
|
da770f4e5427ec73b1ad43846e889fc4ddb7e6c6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from decimal import Decimal
import io
from test_framework.blocktools import add_witness_commitment, create_block, create_coinbase, send_to_witness
from test_framework.messages import BIP125_SEQUENCE_NUMBER, CTransaction
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, bytes_to_hex_str, connect_nodes_bi, hex_str_to_bytes, sync_mempools
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[
"-walletrbf={}".format(i),
"-mintxfee=0.00002",
"-mempoolreplacement=1",
] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
if True:
raise SkipTest("woodcoin doesn't support RBF.")
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
test_maxtxfee_fails(self, rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.getaddressinfo(rbf_node.getnewaddress(address_type='p2sh-segwit'))
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransactionwithwallet(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransactionwithwallet(rawtx)
signedtx = peer_node.signrawtransactionwithwallet(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransactionwithwallet(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 50000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
# (32-byte p2sh-pwpkh output size + 148 p2pkh spend estimate) * 10k(discard_rate) / 1000 = 1800
# P2SH outputs are slightly "over-discarding" due to the IsDust calculation assuming it will
# be spent as a P2PKH.
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 50000 - 1800})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) # change output is eliminated
def test_settxfee(rbf_node, dest_address):
assert_raises_rpc_error(-8, "txfee cannot be less than min relay tx fee", rbf_node.settxfee, Decimal('0.000005'))
assert_raises_rpc_error(-8, "txfee cannot be less than wallet min fee", rbf_node.settxfee, Decimal('0.000015'))
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_maxtxfee_fails(test, rbf_node, dest_address):
test.restart_node(1, ['-maxtxfee=0.00003'] + test.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Specified or calculated fee 0.0000332 is too high (cannot be higher than maxTxFee 0.00003)", rbf_node.bumpfee, rbfid)
test.restart_node(1, test.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then invalidate the block so the rbf tx will be put back in the mempool.
# This makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
# Can not abandon conflicted tx
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: rbf_node.abandontransaction(txid=bumpid))
rbf_node.invalidateblock(block.hash)
# Call abandon to make sure the wallet doesn't attempt to resubmit
# the bump tx and hope the wallet does not rebroadcast before we call.
rbf_node.abandontransaction(bumpid)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def spend_one_input(node, dest_address):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
rawtx = node.createrawtransaction(
[tx_input], {dest_address: Decimal("0.00050000"),
node.getrawchangeaddress(): Decimal("0.00049000")})
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time, version=0x20000000)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
add_witness_commitment(block)
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
return block
if __name__ == "__main__":
BumpFeeTest().main()
| 46.862928
| 159
| 0.729642
|
6223630d4a799a63971a92e17bac7a84de647c98
| 6,888
|
py
|
Python
|
qiskit/aqua/components/variational_forms/ryrz.py
|
hushaohan/aqua
|
8512bc6ce246a8b3cca1e5edb1703b6885aa7c5d
|
[
"Apache-2.0"
] | 1
|
2019-10-12T16:20:25.000Z
|
2019-10-12T16:20:25.000Z
|
qiskit/aqua/components/variational_forms/ryrz.py
|
hushaohan/aqua
|
8512bc6ce246a8b3cca1e5edb1703b6885aa7c5d
|
[
"Apache-2.0"
] | null | null | null |
qiskit/aqua/components/variational_forms/ryrz.py
|
hushaohan/aqua
|
8512bc6ce246a8b3cca1e5edb1703b6885aa7c5d
|
[
"Apache-2.0"
] | 1
|
2022-01-25T07:09:10.000Z
|
2022-01-25T07:09:10.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Layers of Y+Z rotations followed by entangling gates."""
import warnings
from typing import Optional, List
import numpy as np
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.aqua.utils.validation import validate_min, validate_in_set
from qiskit.aqua.components.initial_states import InitialState
from .variational_form import VariationalForm
class RYRZ(VariationalForm):
r"""DEPRECATED. The RYRZ Variational Form.
The RYRZ trial wave function is layers of :math:`y` plus :math:`z` rotations with entanglements.
When none of qubits are unentangled to other qubits, the number of optimizer parameters this
form creates and uses is given by :math:`q \times (d + 1) \times 2`, where :math:`q` is the
total number of qubits and :math:`d` is the depth of the circuit.
Nonetheless, in some cases, if an `entangler_map` does not include all qubits, that is, some
qubits are not entangled by other qubits. The number of parameters is reduced by
:math:`d \times q' \times 2` where :math:`q'` is the number of unentangled qubits.
This is because adding more parameters to the unentangled qubits only introduce overhead
without bringing any benefit; furthermore, theoretically, applying multiple Ry and Rz gates
in a row can be reduced to a single Ry gate and one Rz gate with the summed rotation angles.
See :class:`RY` for more detail on `entangler_map` and `entanglement` which apply here too
but note RYRZ only supports 'full' and 'linear' values.
"""
def __init__(self,
num_qubits: int,
depth: int = 3,
entangler_map: Optional[List[List[int]]] = None,
entanglement: str = 'full',
initial_state: Optional[InitialState] = None,
entanglement_gate: str = 'cz',
skip_unentangled_qubits: bool = False) -> None:
"""
Args:
num_qubits: Number of qubits, has a minimum value of 1.
depth: Number of rotation layers, has a minimum value of 1.
entangler_map: Describe the connectivity of qubits, each list pair describes
[source, target], or None for as defined by `entanglement`.
Note that the order is the list is the order of applying the two-qubit gate.
entanglement: ('full' | 'linear') overridden by 'entangler_map` if its
provided. 'full' is all-to-all entanglement, 'linear' is nearest-neighbor.
initial_state: An initial state object
entanglement_gate: ('cz' | 'cx')
skip_unentangled_qubits: Skip the qubits not in the entangler_map
"""
warnings.warn('The qiskit.aqua.components.variational_forms.RYRZ object is deprecated as '
'of 0.7.0 and will be removed no sooner than 3 months after the release. You '
'should use qiskit.circuit.library.EfficientSU2 (uses CX entangling) or '
'qiskit.circuit.library.TwoLocal instead.',
DeprecationWarning, stacklevel=2)
validate_min('num_qubits', num_qubits, 1)
validate_min('depth', depth, 1)
validate_in_set('entanglement', entanglement, {'full', 'linear'})
validate_in_set('entanglement_gate', entanglement_gate, {'cz', 'cx'})
super().__init__()
self._num_qubits = num_qubits
self._depth = depth
if entangler_map is None:
self._entangler_map = VariationalForm.get_entangler_map(entanglement, num_qubits)
else:
self._entangler_map = VariationalForm.validate_entangler_map(entangler_map, num_qubits)
# determine the entangled qubits
all_qubits = []
for src, targ in self._entangler_map:
all_qubits.extend([src, targ])
self._entangled_qubits = sorted(list(set(all_qubits)))
self._initial_state = initial_state
self._entanglement_gate = entanglement_gate
self._skip_unentangled_qubits = skip_unentangled_qubits
# for the first layer
self._num_parameters = len(self._entangled_qubits) * 2 if self._skip_unentangled_qubits \
else self._num_qubits * 2
# for repeated block
self._num_parameters += len(self._entangled_qubits) * depth * 2
self._bounds = [(-np.pi, np.pi)] * self._num_parameters
self._support_parameterized_circuit = True
def construct_circuit(self, parameters, q=None):
"""
Construct the variational form, given its parameters.
Args:
parameters (Union(numpy.ndarray, list[Parameter], ParameterVector)): circuit parameters
q (QuantumRegister): Quantum Register for the circuit.
Returns:
QuantumCircuit: a quantum circuit with given `parameters`
Raises:
ValueError: the number of parameters is incorrect.
"""
if len(parameters) != self._num_parameters:
raise ValueError('The number of parameters has to be {}'.format(self._num_parameters))
if q is None:
q = QuantumRegister(self._num_qubits, name='q')
if self._initial_state is not None:
circuit = self._initial_state.construct_circuit('circuit', q)
else:
circuit = QuantumCircuit(q)
param_idx = 0
for qubit in range(self._num_qubits):
if not self._skip_unentangled_qubits or qubit in self._entangled_qubits:
circuit.u3(parameters[param_idx], 0.0, 0.0, q[qubit]) # ry
circuit.u1(parameters[param_idx + 1], q[qubit]) # rz
param_idx += 2
for _ in range(self._depth):
circuit.barrier(q)
for src, targ in self._entangler_map:
if self._entanglement_gate == 'cz':
circuit.u2(0.0, np.pi, q[targ]) # h
circuit.cx(q[src], q[targ])
circuit.u2(0.0, np.pi, q[targ]) # h
else:
circuit.cx(q[src], q[targ])
circuit.barrier(q)
for qubit in self._entangled_qubits:
circuit.u3(parameters[param_idx], 0.0, 0.0, q[qubit]) # ry
circuit.u1(parameters[param_idx + 1], q[qubit]) # rz
param_idx += 2
circuit.barrier(q)
return circuit
| 46.857143
| 100
| 0.643438
|
6303628c577532ca0eb474a550f4dfc0015f35b7
| 680
|
py
|
Python
|
Internetworking Distributed Project/cs558l_esha/lab5/leon_newfiles/server.py
|
supriyasingh01/github_basics
|
8aa93f783cfe347368763ef31be5ab59fe8476a1
|
[
"CC0-1.0"
] | null | null | null |
Internetworking Distributed Project/cs558l_esha/lab5/leon_newfiles/server.py
|
supriyasingh01/github_basics
|
8aa93f783cfe347368763ef31be5ab59fe8476a1
|
[
"CC0-1.0"
] | null | null | null |
Internetworking Distributed Project/cs558l_esha/lab5/leon_newfiles/server.py
|
supriyasingh01/github_basics
|
8aa93f783cfe347368763ef31be5ab59fe8476a1
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/python
from socket import *
from sys import argv
from thread import start_new_thread
from threads import *
import os
sock = socket(AF_INET, SOCK_DGRAM)
sock.bind(('',int(argv[1])))
port_num= int(sys.argv[1])
num_of_servers=int(sys.argv[2])
index=0
def fork_server(port_num,index):
child_pid = os.fork()
if child_pid==0:
print "Server waiting for connection on port ",port_num
message, client_addr = sock.recvfrom(256)
print "recieved message"
start_new_thread(server_sends_file, (sock, message, client_addr,port_num,index))
else:
print "This is a parent process"
for x in range(0, num_of_servers):
fork_server(port_num+x,x)
while(1):
1
| 21.25
| 82
| 0.732353
|
e7af28de9c5e2f7ad8b2c109e12025c105d07104
| 1,149
|
py
|
Python
|
test/functional/rpc_deprecated.py
|
ocvcoin/ocvcoin
|
e816ac4a7dc57b00a5e085052a6bb3c94bf4959b
|
[
"MIT"
] | 2
|
2021-10-30T04:29:45.000Z
|
2021-12-11T16:49:57.000Z
|
test/functional/rpc_deprecated.py
|
ocvcoin/ocvcoin
|
e816ac4a7dc57b00a5e085052a6bb3c94bf4959b
|
[
"MIT"
] | 2
|
2021-09-13T22:46:42.000Z
|
2021-09-14T08:47:00.000Z
|
test/functional/rpc_deprecated.py
|
ocvcoin/ocvcoin
|
e816ac4a7dc57b00a5e085052a6bb3c94bf4959b
|
[
"MIT"
] | 2
|
2021-11-18T09:48:59.000Z
|
2021-12-23T04:33:02.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Ocvcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import OcvcoinTestFramework
class DeprecatedRpcTest(OcvcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ['-deprecatedrpc=bumpfee']]
def run_test(self):
# This test should be used to verify correct behaviour of deprecated
# RPC methods with and without the -deprecatedrpc flags. For example:
#
# In set_test_params:
# self.extra_args = [[], ["-deprecatedrpc=generate"]]
#
# In run_test:
# self.log.info("Test generate RPC")
# assert_raises_rpc_error(-32, 'The wallet generate rpc method is deprecated', self.nodes[0].rpc.generate, 1)
# self.nodes[1].generate(1)
self.log.info("No tested deprecated RPC methods")
if __name__ == '__main__':
DeprecatedRpcTest().main()
| 38.3
| 117
| 0.681462
|
60f82a7a10ba10217e98674f77196aee5fc61533
| 916
|
py
|
Python
|
solutions/257.binary-tree-paths.241426977.ac.py
|
satu0king/Leetcode-Solutions
|
2edff60d76c2898d912197044f6284efeeb34119
|
[
"MIT"
] | 78
|
2020-10-22T11:31:53.000Z
|
2022-02-22T13:27:49.000Z
|
solutions/257.binary-tree-paths.241426977.ac.py
|
satu0king/Leetcode-Solutions
|
2edff60d76c2898d912197044f6284efeeb34119
|
[
"MIT"
] | null | null | null |
solutions/257.binary-tree-paths.241426977.ac.py
|
satu0king/Leetcode-Solutions
|
2edff60d76c2898d912197044f6284efeeb34119
|
[
"MIT"
] | 26
|
2020-10-23T15:10:44.000Z
|
2021-11-07T16:13:50.000Z
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def binaryTreePaths(self, root):
"""
:type root: TreeNode
:rtype: List[str]
"""
ans = []
if root is None:
return ans
def f(node, path = ""):
if node is None:
return
if path:
path+="->" + str(node.val)
else:
path = str(node.val)
if node.left is None and node.right is None:
ans.append(path)
return
f(node.left, path)
f(node.right, path)
f(root)
return ans;
| 21.809524
| 56
| 0.387555
|
4abf0681894a39205994e0c2057243a87387221d
| 482
|
py
|
Python
|
2020/15b.py
|
combatopera/advent2020
|
6fa54e91ef1a5443dff36c15e65892701293201f
|
[
"Unlicense"
] | 2
|
2021-12-04T00:02:12.000Z
|
2021-12-11T05:38:45.000Z
|
2020/15b.py
|
combatopera/advent2020
|
6fa54e91ef1a5443dff36c15e65892701293201f
|
[
"Unlicense"
] | null | null | null |
2020/15b.py
|
combatopera/advent2020
|
6fa54e91ef1a5443dff36c15e65892701293201f
|
[
"Unlicense"
] | 1
|
2020-12-20T18:50:54.000Z
|
2020-12-20T18:50:54.000Z
|
#!/usr/bin/env python3
from pathlib import Path
totalturns = 30000000
def main():
t1 = {}
t2 = {}
turn = 1
with Path('input', '15').open() as f:
for n in map(int, f.read().split(',')):
t1[n] = t2.get(n)
t2[n] = turn
turn += 1
while turn <= totalturns:
n = t2[n] - t1[n] if t1.get(n) else 0
t1[n] = t2.get(n)
t2[n] = turn
turn += 1
print(n)
if '__main__' == __name__:
main()
| 19.28
| 47
| 0.46473
|
0276705251539727eed8c4ca6f88d24d150d8360
| 4,576
|
py
|
Python
|
omni_cv_rules/coconut/recursive.py
|
proboscis/omni-cv-rules
|
0a3d4763c0c50bc32974a16531b3d0a1d396c05a
|
[
"MIT"
] | 2
|
2022-01-28T13:37:08.000Z
|
2022-03-03T20:29:20.000Z
|
omni_cv_rules/coconut/recursive.py
|
proboscis/omni-cv-rules
|
0a3d4763c0c50bc32974a16531b3d0a1d396c05a
|
[
"MIT"
] | null | null | null |
omni_cv_rules/coconut/recursive.py
|
proboscis/omni-cv-rules
|
0a3d4763c0c50bc32974a16531b3d0a1d396c05a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# __coconut_hash__ = 0x4bbe2f00
# Compiled with Coconut version 1.6.0 [Vocational Guidance Counsellor]
# Coconut Header: -------------------------------------------------------------
from __future__ import generator_stop
import sys as _coconut_sys, os as _coconut_os
_coconut_file_dir = _coconut_os.path.dirname(_coconut_os.path.abspath(__file__))
_coconut_cached_module = _coconut_sys.modules.get("__coconut__")
if _coconut_cached_module is not None and _coconut_os.path.dirname(_coconut_cached_module.__file__) != _coconut_file_dir:
del _coconut_sys.modules["__coconut__"]
_coconut_sys.path.insert(0, _coconut_file_dir)
_coconut_module_name = _coconut_os.path.splitext(_coconut_os.path.basename(_coconut_file_dir))[0]
if _coconut_module_name and _coconut_module_name[0].isalpha() and all(c.isalpha() or c.isdigit() for c in _coconut_module_name) and "__init__.py" in _coconut_os.listdir(_coconut_file_dir):
_coconut_full_module_name = str(_coconut_module_name + ".__coconut__")
import __coconut__ as _coconut__coconut__
_coconut__coconut__.__name__ = _coconut_full_module_name
for _coconut_v in vars(_coconut__coconut__).values():
if getattr(_coconut_v, "__module__", None) == "__coconut__":
try:
_coconut_v.__module__ = _coconut_full_module_name
except AttributeError:
_coconut_v_type = type(_coconut_v)
if getattr(_coconut_v_type, "__module__", None) == "__coconut__":
_coconut_v_type.__module__ = _coconut_full_module_name
_coconut_sys.modules[_coconut_full_module_name] = _coconut__coconut__
from __coconut__ import *
from __coconut__ import _coconut_call_set_names, _coconut, _coconut_MatchError, _coconut_igetitem, _coconut_base_compose, _coconut_forward_compose, _coconut_back_compose, _coconut_forward_star_compose, _coconut_back_star_compose, _coconut_forward_dubstar_compose, _coconut_back_dubstar_compose, _coconut_pipe, _coconut_star_pipe, _coconut_dubstar_pipe, _coconut_back_pipe, _coconut_back_star_pipe, _coconut_back_dubstar_pipe, _coconut_none_pipe, _coconut_none_star_pipe, _coconut_none_dubstar_pipe, _coconut_bool_and, _coconut_bool_or, _coconut_none_coalesce, _coconut_minus, _coconut_map, _coconut_partial, _coconut_get_function_match_error, _coconut_base_pattern_func, _coconut_addpattern, _coconut_sentinel, _coconut_assert, _coconut_mark_as_match, _coconut_reiterable, _coconut_self_match_types, _coconut_dict_merge, _coconut_exec
_coconut_sys.path.pop(0)
# Compiled Coconut: -----------------------------------------------------------
from omni_cv_rules.coconut.omni_converter import AutoList # from omni_cv_rules.coconut.omni_converter import AutoList
def intra_list_conversions(state, neighbors): # def intra_list_conversions(state,neighbors):
_coconut_case_match_to_0 = state # case state:
_coconut_case_match_check_0 = False # case state:
if (_coconut.isinstance(_coconut_case_match_to_0, AutoList)) and (_coconut.len(_coconut_case_match_to_0) == 1) and (_coconut.isinstance(_coconut_case_match_to_0[0], AutoList)) and (_coconut.len(_coconut_case_match_to_0[0]) == 1): # case state:
_coconut_case_match_check_0 = True # case state:
if _coconut_case_match_check_0: # case state:
return ([]) # dont dive into nested list too much # return [] # dont dive into nested list too much
if not _coconut_case_match_check_0: # match AutoList(es):
_coconut_match_set_name_es = _coconut_sentinel # match AutoList(es):
if (_coconut.isinstance(_coconut_case_match_to_0, AutoList)) and (_coconut.len(_coconut_case_match_to_0) == 1): # match AutoList(es):
_coconut_match_set_name_es = _coconut_case_match_to_0[0] # match AutoList(es):
_coconut_case_match_check_0 = True # match AutoList(es):
if _coconut_case_match_check_0: # match AutoList(es):
if _coconut_match_set_name_es is not _coconut_sentinel: # match AutoList(es):
es = _coconut_case_match_to_0[0] # match AutoList(es):
if _coconut_case_match_check_0: # match AutoList(es):
return ([(lambda f, new_state, cost, name: (lambda items: [f(i) for i in items], AutoList(new_state), "[{_coconut_format_0}]".format(_coconut_format_0=(name)), cost + 1))(re.converter, re.new_format, re.cost, re.name) for re in neighbors(es)]) # return [((f,new_state,cost,name)->(
| 81.714286
| 834
| 0.73514
|
f0b79d423a8c240bd64dee60428cc489899956a0
| 1,669
|
py
|
Python
|
calculation/sha_calc/sha_calc/verification/im_correlations.py
|
ucgmsim/gmhazard
|
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
|
[
"MIT"
] | null | null | null |
calculation/sha_calc/sha_calc/verification/im_correlations.py
|
ucgmsim/gmhazard
|
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
|
[
"MIT"
] | 8
|
2021-10-13T02:33:23.000Z
|
2022-03-29T21:01:08.000Z
|
calculation/sha_calc/sha_calc/verification/im_correlations.py
|
ucgmsim/gmhazard
|
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
|
[
"MIT"
] | null | null | null |
"""Produces a table of the correlations for non-pSA IM, and for
pSA produces period-based plots"""
import argparse
from typing import Sequence
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sha_calc import gcim
DEFAULT_PERIODS = np.logspace(np.log(0.01000001), np.log(10.0), base=np.e)
DEFAULT_PERIODS[-1] = 10.0
DEFAULT_IMS = ["PGA", "PGV", "CAV", "ASI", "DSI", "SI", "Ds575", "Ds595", "AI"]
def main(
output_dir: Path,
pSA_periods: Sequence[float] = DEFAULT_PERIODS,
ims: Sequence[str] = DEFAULT_IMS,
):
pSA_periods = np.sort(np.asarray(pSA_periods))
pSA_ims = np.char.add("pSA_", pSA_periods.astype(str))
# Generate the plots
for cur_im in ims:
cur_corr = [
gcim.get_im_correlations(cur_im, cur_pSA_im) for cur_pSA_im in pSA_ims
]
plt.figure()
plt.plot(DEFAULT_PERIODS, cur_corr)
plt.semilogx()
plt.xlabel("pSA period (s)")
plt.ylabel(r"$\rho$")
plt.title(f"Correlation of {cur_im} vs pSA")
plt.savefig(output_dir / f"{cur_im}_pSA_comparison.png")
plt.close()
# Generate the csv
im_correlations = [
[gcim.get_im_correlations(cur_im_i, cur_im_j) for cur_im_i in ims]
for cur_im_j in ims
]
im_correlations_df = pd.DataFrame(data=im_correlations, columns=ims, index=ims)
im_correlations_df.to_csv(output_dir / "IM_correlations.csv")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("output_dir", type=Path)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
main(args.output_dir)
| 28.288136
| 83
| 0.672858
|
cb0d41622004cd107ae0b5920ce9b869b3606440
| 491
|
py
|
Python
|
generators/repeater.py
|
CodyKochmann/generators
|
a637bf9cb5e48251aa800753ba0aa79b3ca18dcf
|
[
"MIT"
] | 6
|
2017-12-21T04:32:35.000Z
|
2022-02-15T07:06:45.000Z
|
generators/repeater.py
|
CodyKochmann/generators
|
a637bf9cb5e48251aa800753ba0aa79b3ca18dcf
|
[
"MIT"
] | 21
|
2017-09-08T13:02:18.000Z
|
2020-03-28T19:10:01.000Z
|
generators/repeater.py
|
CodyKochmann/generators
|
a637bf9cb5e48251aa800753ba0aa79b3ca18dcf
|
[
"MIT"
] | 2
|
2018-09-30T16:16:10.000Z
|
2019-05-06T02:16:11.000Z
|
# -*- coding: utf-8 -*-
# @Author: Cody Kochmann
# @Date: 2018-02-17 14:26:22
# @Last Modified by: Cody Kochmann
# @Last Modified time: 2018-02-17 14:38:45
def repeater(pipe, how_many=2):
''' this function repeats each value in the pipeline however many times you need '''
r = range(how_many)
for i in pipe:
for _ in r:
yield i
if __name__ == '__main__':
l = list(range(10))
print(l)
print(list(repeater(l)))
print(list(repeater(l, 3)))
| 25.842105
| 88
| 0.610998
|
6693d7b1cddffe6331a458ab63e364f654cbcb90
| 10,245
|
py
|
Python
|
release/stubs.min/Rhino/Geometry/__init___parts/BoundingBox.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/Rhino/Geometry/__init___parts/BoundingBox.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/Rhino/Geometry/__init___parts/BoundingBox.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
class BoundingBox(object):
"""
Represents the value of two points in a bounding box
defined by the two extreme corner points.
This box is therefore aligned to the world X,Y and Z axes.
BoundingBox(min: Point3d,max: Point3d)
BoundingBox(minX: float,minY: float,minZ: float,maxX: float,maxY: float,maxZ: float)
BoundingBox(points: IEnumerable[Point3d])
"""
def ClosestPoint(self,point,includeInterior=None):
"""
ClosestPoint(self: BoundingBox,point: Point3d,includeInterior: bool) -> Point3d
Finds the closest point on or in the boundingbox.
point: Sample point.
includeInterior: If false,the point is projected onto the boundary faces only,
otherwise the
interior of the box is also taken into consideration.
Returns: The point on or in the box that is closest to the sample point.
ClosestPoint(self: BoundingBox,point: Point3d) -> Point3d
Finds the closest point on or in the boundingbox.
point: Sample point.
Returns: The point on or in the box that is closest to the sample point.
"""
pass
def Contains(self,*__args):
"""
Contains(self: BoundingBox,box: BoundingBox) -> bool
Determines whether this boundingbox contains another boundingbox.
This is the same
as calling Contains(box,false).
box: Box to test.
Returns: true if the box is on the inside of this boundingbox,or is coincident with the surface of it.
Contains(self: BoundingBox,box: BoundingBox,strict: bool) -> bool
Determines whether this boundingbox contains another boundingbox.
The user can
choose how to treat boundingboxes with coincidents surfaces.
box: Box to test.
strict: If true,the box needs to be fully on the inside of the boundingbox.
I.e.
coincident boxes will be considered 'outside'.
Returns: true if the box is (strictly) on the inside of this BoundingBox.
Contains(self: BoundingBox,point: Point3d) -> bool
Tests a point for boundingbox inclusion. This is the same as calling Contains(point,false)
point: Point to test.
Returns: true if the point is on the inside of or coincident with this boundingbox; otherwise false.
Contains(self: BoundingBox,point: Point3d,strict: bool) -> bool
Tests a point for BoundingBox inclusion.
point: Point to test.
strict: If true,the point needs to be fully on the inside of the BoundingBox.
I.e.
coincident points will be considered 'outside'.
Returns: If 'strict' is affirmative,true if the point is inside this boundingbox; false if it is on the
surface or outside.If 'strict' is negative,true if the point is on the surface or on the inside
of the boundingbox; otherwise false.
"""
pass
def Corner(self,minX,minY,minZ):
"""
Corner(self: BoundingBox,minX: bool,minY: bool,minZ: bool) -> Point3d
Gets one of the eight corners of the box.
minX: true for the minimum on the X axis; false for the maximum.
minY: true for the minimum on the Y axis; false for the maximum.
minZ: true for the minimum on the Z axis; false for the maximum.
Returns: The requested point.
"""
pass
def FurthestPoint(self,point):
"""
FurthestPoint(self: BoundingBox,point: Point3d) -> Point3d
Finds the furthest point on the Box.
point: Sample point.
Returns: The point on the box that is furthest from the sample point.
"""
pass
def GetCorners(self):
"""
GetCorners(self: BoundingBox) -> Array[Point3d]
Gets an array filled with the 8 corner points of this box.
See remarks for the
return order.
Returns: An array of 8 corners.
"""
pass
def GetEdges(self):
"""
GetEdges(self: BoundingBox) -> Array[Line]
Gets an array of the 12 edges of this box.
Returns: If the boundingbox IsValid,the 12 edges; otherwise,null.
"""
pass
def Inflate(self,*__args):
"""
Inflate(self: BoundingBox,xAmount: float,yAmount: float,zAmount: float)
Inflate the box with custom amounts in all directions.
Inflating with negative
amounts may result in decreasing boxes.
InValid boxes can not be inflated.
xAmount: Amount (in model units) to inflate this box in the x direction.
yAmount: Amount (in model units) to inflate this box in the y direction.
zAmount: Amount (in model units) to inflate this box in the z direction.
Inflate(self: BoundingBox,amount: float)
Inflates the box with equal amounts in all directions.
Inflating with negative
amounts may result in decreasing boxes.
Invalid boxes can not be inflated.
amount: Amount (in model units) to inflate this box in all directions.
"""
pass
@staticmethod
def Intersection(a,b):
"""
Intersection(a: BoundingBox,b: BoundingBox) -> BoundingBox
Computes the intersection of two bounding boxes.
a: A first bounding box.
b: A second bounding box.
Returns: The intersection bounding box.
"""
pass
def IsDegenerate(self,tolerance):
"""
IsDegenerate(self: BoundingBox,tolerance: float) -> int
Determines whether a bounding box is degenerate (flat) in one or more directions.
tolerance: Distances <= tolerance will be considered to be zero. If tolerance
is negative
(default),then a scale invarient tolerance is used.
Returns: 0=box is not degenerate
1=box is a rectangle (degenerate in one direction).
2=box is a line (degenerate in two directions).
3=box is a point
(degenerate in three directions)
4=box is not valid.
"""
pass
def MakeValid(self):
"""
MakeValid(self: BoundingBox) -> bool
Ensures that the box is defined in an increasing fashion along X,Y and Z axes.
If
the Min or Max points are unset,this function will not change the box.
Returns: true if the box was made valid,false if the box could not be made valid.
"""
pass
def PointAt(self,tx,ty,tz):
"""
PointAt(self: BoundingBox,tx: float,ty: float,tz: float) -> Point3d
Evaluates the boundingbox with normalized parameters.
The box has idealized side
length of 1x1x1.
tx: Normalized (between 0 and 1 is inside the box) parameter along the X direction.
ty: Normalized (between 0 and 1 is inside the box) parameter along the Y direction.
tz: Normalized (between 0 and 1 is inside the box) parameter along the Z direction.
Returns: The point at the {tx,ty,tz} parameters.
"""
pass
def ToBrep(self):
"""
ToBrep(self: BoundingBox) -> Brep
Constructs a Rhino.Geometry.Brep representation of this boundingbox.
Returns: If this operation is sucessfull,a Brep representation of this box; otherwise null.
"""
pass
def ToString(self):
"""
ToString(self: BoundingBox) -> str
Constructs the string representation of this aligned boundingbox.
Returns: Text.
"""
pass
def Transform(self,xform):
"""
Transform(self: BoundingBox,xform: Transform) -> bool
Updates this boundingbox to be the smallest axis aligned
boundingbox that contains
the transformed result of its 8 original corner
points.
xform: A transform.
Returns: true if this operation is sucessfull; otherwise false.
"""
pass
def Union(self,*__args):
"""
Union(a: BoundingBox,b: BoundingBox) -> BoundingBox
Returns a new BoundingBox that represents the union of boxes a and b.
a: First box to include in union.
b: Second box to include in union.
Returns: The BoundingBox that contains both a and b.
Union(box: BoundingBox,point: Point3d) -> BoundingBox
Returns a new BoundingBox that represents the union of a bounding box and a point.
box: Box to include in the union.
point: Point to include in the union.
Returns: The BoundingBox that contains both the box and the point.
Union(self: BoundingBox,other: BoundingBox)
Updates this BoundingBox to represent the union of itself and another box.
other: Box to include in this union.
Union(self: BoundingBox,point: Point3d)
Updates this BoundingBox to represent the union of itself and a point.
point: Point to include in the union.
"""
pass
@staticmethod
def __new__(self,*__args):
"""
__new__[BoundingBox]() -> BoundingBox
__new__(cls: type,min: Point3d,max: Point3d)
__new__(cls: type,minX: float,minY: float,minZ: float,maxX: float,maxY: float,maxZ: float)
__new__(cls: type,points: IEnumerable[Point3d])
"""
pass
Center=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the point in the center of the boundingbox.
Get: Center(self: BoundingBox) -> Point3d
"""
Diagonal=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the diagonal vector of this BoundingBox.
The diagonal connects the Min and Max points.
Get: Diagonal(self: BoundingBox) -> Vector3d
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether or not this boundingbox is valid.
Empty boxes are not valid,and neither are boxes with unset points.
Get: IsValid(self: BoundingBox) -> bool
"""
Max=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the point in the maximal corner.
Get: Max(self: BoundingBox) -> Point3d
Set: Max(self: BoundingBox)=value
"""
Min=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the point in the minimal corner.
Get: Min(self: BoundingBox) -> Point3d
Set: Min(self: BoundingBox)=value
"""
Empty=None
Unset=None
| 20.408367
| 108
| 0.659639
|
e58daf90b34601f9ca4eb28bbccd2f2d59bcc996
| 745
|
py
|
Python
|
first_sublime3_plugin/awesome_plugin.py
|
lomignet/thisdataguy_snippets
|
f893bba7cc37b02328459de84c4afcad7350727c
|
[
"BSD-2-Clause"
] | 13
|
2015-10-15T18:27:48.000Z
|
2022-01-31T13:14:50.000Z
|
first_sublime3_plugin/awesome_plugin.py
|
lomignet/thisdataguy_snippets
|
f893bba7cc37b02328459de84c4afcad7350727c
|
[
"BSD-2-Clause"
] | null | null | null |
first_sublime3_plugin/awesome_plugin.py
|
lomignet/thisdataguy_snippets
|
f893bba7cc37b02328459de84c4afcad7350727c
|
[
"BSD-2-Clause"
] | 16
|
2015-08-05T14:37:00.000Z
|
2021-07-15T11:52:51.000Z
|
# please always use shlex with subprocess
import shlex
import sublime
import sublime_plugin
import os
class UpdateOnSave(sublime_plugin.EventListener):
def on_post_save_async(self, view):
filename = view.file_name()
savedfile = os.path.basename(filename)
saveddir = os.path.dirname(filename)
# write in sublime status buffer
sublime.status_message('Manually saving ' + filename)
source_in_vagrant = '/vagrant/' + savedfile
dest_in_vagrant = '/project/' + savedfile
cmd_cp = "vagrant ssh -c 'sudo cp {0} {1}'".format(source, dest)
view.window().run_command('exec', {
'cmd': shlex.split(cmd_cp),
'working_dir': saveddir,
}
)
| 27.592593
| 72
| 0.640268
|
ca2920a43632b8cbbe239baaac296fd98e832d87
| 2,000
|
py
|
Python
|
src/neuron.py
|
drkostas/COSC525-Project1
|
d92a2bc89a30b7377085dc13aea0934851931179
|
[
"Apache-2.0"
] | null | null | null |
src/neuron.py
|
drkostas/COSC525-Project1
|
d92a2bc89a30b7377085dc13aea0934851931179
|
[
"Apache-2.0"
] | 5
|
2022-02-04T00:37:42.000Z
|
2022-02-18T18:56:37.000Z
|
src/neuron.py
|
drkostas/COSC525-Project1
|
d92a2bc89a30b7377085dc13aea0934851931179
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
class Neuron:
# ACT_FUNCTION, NUM_INPUTS, LEARNING_RATE, [INIT_WEIGHTS]
def __init__(self, activation: str, num_inputs: int, lr: float, weights: np.ndarray):
# Initializes all input vars
self.activation = activation
self.num_inputs = num_inputs
self.lr = lr
self.weights = weights
# Initialize all other object vars
self.output = None
self.inputs = None
self.net = None
self.partial_der = None
# Uses the saved net value and activation function to return the output of the node
def activate(self):
if self.activation == "linear":
self.output = self.net
elif self.activation == "logistic":
self.output = 1 / (1 + np.exp(-self.net))
return self.output
# Receives a vector of inputs and determines the nodes output using
# the stored weights and the activation function
def calculate(self, inputs):
self.inputs = np.append(inputs.copy(), [1])
self.net = np.sum(self.inputs * self.weights)
return self.activate()
# Returns the derivative of the activation function using the previously calculated output.
def activation_derivative(self):
if self.activation == "linear":
return 1
elif self.activation == "logistic":
return self.output * (1 - self.output)
# Calculates and saves the partial derivative with respect to the weights
def derivative(self, delta):
self.partial_der = np.array(self.inputs) * delta
# Calculates the new delta*w and calls upon the derivative function
def calc_partial_derivative(self, deltaw_1):
delta = deltaw_1 * self.activation_derivative()
self.derivative(delta)
return delta * self.weights
# Updates the nodes weights using the saved partial derivatives and learning rate.
def update_weights(self):
self.weights = self.weights - self.lr * self.partial_der
| 37.037037
| 95
| 0.6575
|
53d4558d45bf128aa61d612cc89922cd83824618
| 4,467
|
py
|
Python
|
main.py
|
ppizarror/chansey-pygame-example
|
49b42547c0a1c5d743f84c2b5a473708a79fdab3
|
[
"MIT"
] | null | null | null |
main.py
|
ppizarror/chansey-pygame-example
|
49b42547c0a1c5d743f84c2b5a473708a79fdab3
|
[
"MIT"
] | null | null | null |
main.py
|
ppizarror/chansey-pygame-example
|
49b42547c0a1c5d743f84c2b5a473708a79fdab3
|
[
"MIT"
] | null | null | null |
# importacion de librerias
import os
import random
import pygame
from pygame.locals import *
class Actor:
def __init__(self, dx, x, y, vida, size):
self.x = x
self.y = y
self.vida = vida
self.dx = dx
self.direccion = 0
self.initx = x
self.texture = pygame.image.load("res/actor.png")
self.muerto = False
def moverIzquierda(self):
if self.direccion != -1:
self.x = self.initx - self.dx
self.direccion = -1
def moverDerecha(self):
if self.direccion != 1:
self.x = self.initx + self.dx
self.direccion = 1
def moverCentro(self):
if self.direccion != 0:
self.x = self.initx
self.direccion = 0
def getImage(self):
return self.texture
def getX(self):
return self.x
def getY(self):
return self.y
def getPos(self):
return (self.x, self.y)
class Huevo:
def __init__(self, x, y, vel, ac, sound, dx, mono):
self.x = x + int(random.randint(-1, 1)) * dx
self.y = y
self.vel = vel
self.ac = ac
self.t = 0
self.texture = pygame.image.load("res/huevo.png")
self.sound = sound
self.mono = mono
def update(self, lista, i):
self.vel += self.ac * self.t
self.y = self.y + self.vel * self.t
self.t += 0.1
if self.y > 370:
lista.pop(i)
self.mono.vida -= 1
if self.mono.vida < 0:
self.mono.muerto = True
if abs(self.x - self.mono.getX()) < 40 and abs(self.y - self.mono.getY()) < 50:
try:
self.mono.vida += 1
self.sound.play()
lista.pop(i)
except: pass
def getPos(self):
return (self.x, self.y)
def getImage(self):
return self.texture
def main():
# Se activan las librerias
pygame.init()
# Se define el objeto pantalla
screen = pygame.display.set_mode((640, 480))
pygame.display.set_caption("Chansey")
background = pygame.image.load("res/background.gif")
grass = pygame.image.load("res/grass.png")
background = pygame.transform.scale(background, (640, 480))
icon = pygame.image.load("res/icon.png")
pygame.display.set_icon(icon)
clock = pygame.time.Clock()
bgs = pygame.mixer.Sound("res/sound.wav")
bgs.play(-1)
sonidocomer = pygame.mixer.Sound("res/catch.wav")
scorefont = pygame.font.Font("res/font.ttf", 30)
fontmuerto = pygame.font.Font("res/font.ttf", 100)
# Se crean los objetos del juego
lista_huevos = []
mono = Actor(120, 278, 370, 3, (100, 100))
dif = 5
acel = 0.01
while True:
if 1 <= random.randint(1, 100) <= dif:
h = Huevo(310, 0, 1, acel, sonidocomer, 120, mono)
lista_huevos.append(h)
clock.tick(30)
screen.blit(background, (0, 0))
screen.blit(grass, (0, 30))
for event in pygame.event.get():
if event.type == QUIT:
exit()
elif event.type == KEYDOWN:
if event.key == K_F12:
dif += 5
elif event.key == K_F11:
acel += 0.1
keys = pygame.key.get_pressed()
if keys[K_LEFT]:
mono.moverIzquierda()
elif keys[K_RIGHT]:
mono.moverDerecha()
else:
mono.moverCentro()
if not mono.muerto:
if len(lista_huevos) > 0:
j = 0
while True:
lista_huevos[j].update(lista_huevos, j)
j += 1
if j >= len(lista_huevos):
break
for i in lista_huevos:
screen.blit(i.getImage(), i.getPos())
screen.blit(mono.getImage(), mono.getPos())
f1 = scorefont.render("VIDAS " + str(mono.vida), 1, (0, 0, 0))
screen.blit(f1, (5, 5))
else:
f2 = fontmuerto.render("GAME OVER", 1, (0, 0, 0))
screen.blit(f2, (40, 170))
pygame.display.flip()
try: os.system("taskkill /PID " + str(os.getpid()) + " /F")
except: pass
if __name__ == "__main__":
main()
| 27.404908
| 88
| 0.497873
|
39bf981b99889e03cd78e5517e9ad85b7eb90907
| 1,172
|
py
|
Python
|
leetcode/0-250/249-509. Fibonacci Number.py
|
palash24/algorithms-and-data-structures
|
164be7d1a501a21af808673888964bbab36243a1
|
[
"MIT"
] | 23
|
2018-11-06T03:54:00.000Z
|
2022-03-14T13:30:40.000Z
|
leetcode/0-250/249-509. Fibonacci Number.py
|
palash24/algorithms-and-data-structures
|
164be7d1a501a21af808673888964bbab36243a1
|
[
"MIT"
] | null | null | null |
leetcode/0-250/249-509. Fibonacci Number.py
|
palash24/algorithms-and-data-structures
|
164be7d1a501a21af808673888964bbab36243a1
|
[
"MIT"
] | 5
|
2019-05-24T16:56:45.000Z
|
2022-03-10T17:29:10.000Z
|
# 509. Fibonacci Number
class Solution:
def fib2(self, N: int) -> int:
F = [0, 1]
for i in range(2, N + 1):
F.append(F[i - 1] + F[i - 2])
return F[N]
def fib3(self, N: int) -> int:
if N < 2: return N
prev, cur = 0, 1
for i in range(2, N + 1):
prev, cur = cur, prev + cur
return cur
def fib4(self, N: int) -> int:
if N < 2: return N
return self.fib(N - 1) + self.fib(N - 2)
F = [0] * 31
def fib5(self, N: int) -> int:
if N < 2: return N
prev = cur = 0
if self.F[N - 2]: prev = self.F[N - 2]
else:
prev = self.fib(N - 2)
self.F[N - 2] = prev
if self.F[N - 1]: cur = self.F[N - 1]
else:
cur = self.fib(N - 1)
self.F[N - 1] = cur
return prev + cur
memo = {}
def fib(self, N: int) -> int:
if N<2: return N
if N-1 not in self.memo: self.memo[N-1] = self.fib(N-1)
if N-2 not in self.memo: self.memo[N-2] = self.fib(N-2)
return self.memo[N-1] + self.memo[N-2]
print(Solution().fib(7))
| 26.044444
| 63
| 0.43686
|
6bee6a9dd366a02faf0339f9aa1cb6023bed3f38
| 373
|
py
|
Python
|
binance/__init__.py
|
tperalta82/python-binance
|
860250d334f9616a89f53b3ca2adecba4ee2b0f2
|
[
"MIT"
] | 1
|
2021-11-27T09:05:02.000Z
|
2021-11-27T09:05:02.000Z
|
binance/__init__.py
|
tperalta82/python-binance
|
860250d334f9616a89f53b3ca2adecba4ee2b0f2
|
[
"MIT"
] | null | null | null |
binance/__init__.py
|
tperalta82/python-binance
|
860250d334f9616a89f53b3ca2adecba4ee2b0f2
|
[
"MIT"
] | null | null | null |
"""An unofficial Python wrapper for the Binance exchange API v3
.. moduleauthor:: Sam McHardy
"""
__version__ = '1.0.10'
from binance.client import Client, AsyncClient # noqa
from binance.depthcache import DepthCacheManager, OptionsDepthCacheManager, ThreadedDepthCacheManager # noqa
from binance.streams import BinanceSocketManager, ThreadedWebsocketManager # noqa
| 31.083333
| 109
| 0.806971
|
b72e21feb28b9f2a3734888fa4692bb3aac01244
| 9,666
|
py
|
Python
|
mkdocs/nav.py
|
akutz/mkdocs
|
0630c499081b69c8850e3b074cd6a05f15f27371
|
[
"BSD-2-Clause"
] | 1
|
2015-12-14T02:20:13.000Z
|
2015-12-14T02:20:13.000Z
|
mkdocs/nav.py
|
akutz/mkdocs
|
0630c499081b69c8850e3b074cd6a05f15f27371
|
[
"BSD-2-Clause"
] | null | null | null |
mkdocs/nav.py
|
akutz/mkdocs
|
0630c499081b69c8850e3b074cd6a05f15f27371
|
[
"BSD-2-Clause"
] | 2
|
2020-03-22T06:22:45.000Z
|
2021-10-02T08:41:18.000Z
|
# coding: utf-8
"""
Deals with generating the site-wide navigation.
This consists of building a set of interlinked page and header objects.
"""
from __future__ import unicode_literals
import datetime
import logging
import os
from mkdocs import utils, exceptions
log = logging.getLogger(__name__)
def filename_to_title(filename):
"""
Automatically generate a default title, given a filename.
"""
if utils.is_homepage(filename):
return 'Home'
return utils.filename_to_title(filename)
class SiteNavigation(object):
def __init__(self, pages_config, use_directory_urls=True):
self.url_context = URLContext()
self.file_context = FileContext()
self.nav_items, self.pages = _generate_site_navigation(
pages_config, self.url_context, use_directory_urls)
self.homepage = self.pages[0] if self.pages else None
self.use_directory_urls = use_directory_urls
def __str__(self):
return ''.join([str(item) for item in self])
def __iter__(self):
return iter(self.nav_items)
def walk_pages(self):
"""
Returns each page in the site in turn.
Additionally this sets the active status of the pages and headers,
in the site navigation, so that the rendered navbar can correctly
highlight the currently active page and/or header item.
"""
page = self.homepage
page.set_active()
self.url_context.set_current_url(page.abs_url)
self.file_context.set_current_path(page.input_path)
yield page
while page.next_page:
page.set_active(False)
page = page.next_page
page.set_active()
self.url_context.set_current_url(page.abs_url)
self.file_context.set_current_path(page.input_path)
yield page
page.set_active(False)
@property
def source_files(self):
if not hasattr(self, '_source_files'):
self._source_files = set([page.input_path for page in self.pages])
return self._source_files
class URLContext(object):
"""
The URLContext is used to ensure that we can generate the appropriate
relative URLs to other pages from any given page in the site.
We use relative URLs so that static sites can be deployed to any location
without having to specify what the path component on the host will be
if the documentation is not hosted at the root path.
"""
def __init__(self):
self.base_path = '/'
def set_current_url(self, current_url):
self.base_path = os.path.dirname(current_url)
def make_relative(self, url):
"""
Given a URL path return it as a relative URL,
given the context of the current page.
"""
suffix = '/' if (url.endswith('/') and len(url) > 1) else ''
# Workaround for bug on `os.path.relpath()` in Python 2.6
if self.base_path == '/':
if url == '/':
# Workaround for static assets
return '.'
return url.lstrip('/')
# Under Python 2.6, relative_path adds an extra '/' at the end.
relative_path = os.path.relpath(url, start=self.base_path)
relative_path = relative_path.rstrip('/') + suffix
return utils.path_to_url(relative_path)
class FileContext(object):
"""
The FileContext is used to ensure that we can generate the appropriate
full path for other pages given their relative path from a particular page.
This is used when we have relative hyperlinks in the documentation, so that
we can ensure that they point to markdown documents that actually exist
in the `pages` config.
"""
def __init__(self):
self.current_file = None
self.base_path = ''
def set_current_path(self, current_path):
self.current_file = current_path
self.base_path = os.path.dirname(current_path)
def make_absolute(self, path):
"""
Given a relative file path return it as a POSIX-style
absolute filepath, given the context of the current page.
"""
return os.path.normpath(os.path.join(self.base_path, path))
class Page(object):
def __init__(self, title, url, path, url_context):
self.title = title
self.abs_url = url
self.active = False
self.url_context = url_context
self.update_date = datetime.datetime.now().strftime("%Y-%m-%d")
# Relative paths to the input markdown file and output html file.
self.input_path = path
self.output_path = utils.get_html_path(path)
# Links to related pages
self.previous_page = None
self.next_page = None
self.ancestors = []
@property
def url(self):
return self.url_context.make_relative(self.abs_url)
@property
def is_homepage(self):
return utils.is_homepage(self.input_path)
@property
def is_top_level(self):
return len(self.ancestors) == 0
def __str__(self):
return self.indent_print()
def indent_print(self, depth=0):
indent = ' ' * depth
active_marker = ' [*]' if self.active else ''
title = self.title if (self.title is not None) else '[blank]'
return '%s%s - %s%s\n' % (indent, title, self.abs_url, active_marker)
def set_active(self, active=True):
self.active = active
for ancestor in self.ancestors:
ancestor.set_active(active)
class Header(object):
def __init__(self, title, children):
self.title, self.children = title, children
self.active = False
self.ancestors = []
def __str__(self):
return self.indent_print()
@property
def is_top_level(self):
return len(self.ancestors) == 0
def indent_print(self, depth=0):
indent = ' ' * depth
active_marker = ' [*]' if self.active else ''
ret = '%s%s%s\n' % (indent, self.title, active_marker)
for item in self.children:
ret += item.indent_print(depth + 1)
return ret
def set_active(self, active=True):
self.active = active
for ancestor in self.ancestors:
ancestor.set_active(active)
def _path_to_page(path, title, url_context, use_directory_urls):
if title is None:
title = filename_to_title(path.split(os.path.sep)[-1])
url = utils.get_url_path(path, use_directory_urls)
return Page(title=title, url=url, path=path,
url_context=url_context)
def _follow(config_line, url_context, use_dir_urls, header=None, title=None):
if isinstance(config_line, utils.string_types):
path = os.path.normpath(config_line)
page = _path_to_page(path, title, url_context, use_dir_urls)
if header:
page.ancestors = header.ancestors + [header, ]
header.children.append(page)
yield page
raise StopIteration
elif not isinstance(config_line, dict):
msg = ("Line in 'page' config is of type {0}, dict or string "
"expected. Config: {1}").format(type(config_line), config_line)
raise exceptions.ConfigurationError(msg)
if len(config_line) > 1:
raise exceptions.ConfigurationError(
"Page configs should be in the format 'name: markdown.md'. The "
"config contains an invalid entry: {0}".format(config_line))
elif len(config_line) == 0:
log.warning("Ignoring empty line in the pages config.")
raise StopIteration
next_cat_or_title, subpages_or_path = next(iter(config_line.items()))
if isinstance(subpages_or_path, utils.string_types):
path = subpages_or_path
for sub in _follow(path, url_context, use_dir_urls, header=header, title=next_cat_or_title):
yield sub
raise StopIteration
elif not isinstance(subpages_or_path, list):
msg = ("Line in 'page' config is of type {0}, list or string "
"expected for sub pages. Config: {1}"
).format(type(config_line), config_line)
raise exceptions.ConfigurationError(msg)
next_header = Header(title=next_cat_or_title, children=[])
if header:
next_header.ancestors = [header]
header.children.append(next_header)
yield next_header
subpages = subpages_or_path
for subpage in subpages:
for sub in _follow(subpage, url_context, use_dir_urls, next_header):
yield sub
def _generate_site_navigation(pages_config, url_context, use_dir_urls=True):
"""
Returns a list of Page and Header instances that represent the
top level site navigation.
"""
nav_items = []
pages = []
previous = None
for config_line in pages_config:
for page_or_header in _follow(
config_line, url_context, use_dir_urls):
if isinstance(page_or_header, Header):
if page_or_header.is_top_level:
nav_items.append(page_or_header)
elif isinstance(page_or_header, Page):
if page_or_header.is_top_level:
nav_items.append(page_or_header)
pages.append(page_or_header)
if previous:
page_or_header.previous_page = previous
previous.next_page = page_or_header
previous = page_or_header
if len(pages) == 0:
raise exceptions.ConfigurationError(
"No pages found in the pages config. "
"Remove it entirely to enable automatic page discovery.")
return (nav_items, pages)
| 31.796053
| 100
| 0.641941
|
59e6389fac674a564948fc3579754d06af901886
| 43,927
|
py
|
Python
|
astropy/visualization/wcsaxes/coordinate_helpers.py
|
MatiasRepetto/astropy
|
689f9d3b063145150149e592a879ee40af1fac06
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/visualization/wcsaxes/coordinate_helpers.py
|
MatiasRepetto/astropy
|
689f9d3b063145150149e592a879ee40af1fac06
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/visualization/wcsaxes/coordinate_helpers.py
|
MatiasRepetto/astropy
|
689f9d3b063145150149e592a879ee40af1fac06
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file defines the classes used to represent a 'coordinate', which includes
axes, ticks, tick labels, and grid lines.
"""
import warnings
import numpy as np
from matplotlib.ticker import Formatter
from matplotlib.transforms import Affine2D, ScaledTranslation
from matplotlib.patches import PathPatch
from matplotlib.path import Path
from matplotlib import rcParams
from astropy import units as u
from astropy.utils.exceptions import AstropyDeprecationWarning
from .frame import RectangularFrame1D, EllipticalFrame
from .formatter_locator import AngleFormatterLocator, ScalarFormatterLocator
from .ticks import Ticks
from .ticklabels import TickLabels
from .axislabels import AxisLabels
from .grid_paths import get_lon_lat_path, get_gridline_path
__all__ = ['CoordinateHelper']
# Matplotlib's gridlines use Line2D, but ours use PathPatch.
# Patches take a slightly different format of linestyle argument.
LINES_TO_PATCHES_LINESTYLE = {'-': 'solid',
'--': 'dashed',
'-.': 'dashdot',
':': 'dotted',
'none': 'none',
'None': 'none',
' ': 'none',
'': 'none'}
def wrap_angle_at(values, coord_wrap):
# On ARM processors, np.mod emits warnings if there are NaN values in the
# array, although this doesn't seem to happen on other processors.
with np.errstate(invalid='ignore'):
return np.mod(values - coord_wrap, 360.) - (360. - coord_wrap)
class CoordinateHelper:
"""
Helper class to control one of the coordinates in the
:class:`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
parent_axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate helper belongs to.
parent_map : :class:`~astropy.visualization.wcsaxes.CoordinatesMap`
The :class:`~astropy.visualization.wcsaxes.CoordinatesMap` object this
coordinate belongs to.
transform : `~matplotlib.transforms.Transform`
The transform corresponding to this coordinate system.
coord_index : int
The index of this coordinate in the
:class:`~astropy.visualization.wcsaxes.CoordinatesMap`.
coord_type : {'longitude', 'latitude', 'scalar'}
The type of this coordinate, which is used to determine the wrapping and
boundary behavior of coordinates. Longitudes wrap at ``coord_wrap``,
latitudes have to be in the range -90 to 90, and scalars are unbounded
and do not wrap.
coord_unit : `~astropy.units.Unit`
The unit that this coordinate is in given the output of transform.
format_unit : `~astropy.units.Unit`, optional
The unit to use to display the coordinates.
coord_wrap : float
The angle at which the longitude wraps (defaults to 360)
frame : `~astropy.visualization.wcsaxes.frame.BaseFrame`
The frame of the :class:`~astropy.visualization.wcsaxes.WCSAxes`.
"""
def __init__(self, parent_axes=None, parent_map=None, transform=None,
coord_index=None, coord_type='scalar', coord_unit=None,
coord_wrap=None, frame=None, format_unit=None, default_label=None):
# Keep a reference to the parent axes and the transform
self.parent_axes = parent_axes
self.parent_map = parent_map
self.transform = transform
self.coord_index = coord_index
self.coord_unit = coord_unit
self._format_unit = format_unit
self.frame = frame
self.default_label = default_label or ''
self._auto_axislabel = True
# Disable auto label for elliptical frames as it puts labels in
# annoying places.
if issubclass(self.parent_axes.frame_class, EllipticalFrame):
self._auto_axislabel = False
self.set_coord_type(coord_type, coord_wrap)
# Initialize ticks
self.dpi_transform = Affine2D()
self.offset_transform = ScaledTranslation(0, 0, self.dpi_transform)
self.ticks = Ticks(transform=parent_axes.transData + self.offset_transform)
# Initialize tick labels
self.ticklabels = TickLabels(self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure())
self.ticks.display_minor_ticks(rcParams['xtick.minor.visible'])
self.minor_frequency = 5
# Initialize axis labels
self.axislabels = AxisLabels(self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure())
# Initialize container for the grid lines
self.grid_lines = []
# Initialize grid style. Take defaults from matplotlib.rcParams.
# Based on matplotlib.axis.YTick._get_gridline.
self.grid_lines_kwargs = {'visible': False,
'facecolor': 'none',
'edgecolor': rcParams['grid.color'],
'linestyle': LINES_TO_PATCHES_LINESTYLE[rcParams['grid.linestyle']],
'linewidth': rcParams['grid.linewidth'],
'alpha': rcParams['grid.alpha'],
'transform': self.parent_axes.transData}
def grid(self, draw_grid=True, grid_type=None, **kwargs):
"""
Plot grid lines for this coordinate.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : {'lines', 'contours'}
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended. By default, 'lines' is used if the transform has
an inverse, otherwise 'contours' is used.
"""
if grid_type == 'lines' and not self.transform.has_inverse:
raise ValueError('The specified transform has no inverse, so the '
'grid cannot be drawn using grid_type=\'lines\'')
if grid_type is None:
grid_type = 'lines' if self.transform.has_inverse else 'contours'
if grid_type in ('lines', 'contours'):
self._grid_type = grid_type
else:
raise ValueError("grid_type should be 'lines' or 'contours'")
if 'color' in kwargs:
kwargs['edgecolor'] = kwargs.pop('color')
self.grid_lines_kwargs.update(kwargs)
if self.grid_lines_kwargs['visible']:
if not draw_grid:
self.grid_lines_kwargs['visible'] = False
else:
self.grid_lines_kwargs['visible'] = True
def set_coord_type(self, coord_type, coord_wrap=None):
"""
Set the coordinate type for the axis.
Parameters
----------
coord_type : str
One of 'longitude', 'latitude' or 'scalar'
coord_wrap : float, optional
The value to wrap at for angular coordinates
"""
self.coord_type = coord_type
if coord_type == 'longitude' and coord_wrap is None:
self.coord_wrap = 360
elif coord_type != 'longitude' and coord_wrap is not None:
raise NotImplementedError('coord_wrap is not yet supported '
'for non-longitude coordinates')
else:
self.coord_wrap = coord_wrap
# Initialize tick formatter/locator
if coord_type == 'scalar':
self._coord_scale_to_deg = None
self._formatter_locator = ScalarFormatterLocator(unit=self.coord_unit)
elif coord_type in ['longitude', 'latitude']:
if self.coord_unit is u.deg:
self._coord_scale_to_deg = None
else:
self._coord_scale_to_deg = self.coord_unit.to(u.deg)
self._formatter_locator = AngleFormatterLocator(unit=self.coord_unit,
format_unit=self._format_unit)
else:
raise ValueError("coord_type should be one of 'scalar', 'longitude', or 'latitude'")
def set_major_formatter(self, formatter):
"""
Set the formatter to use for the major tick labels.
Parameters
----------
formatter : str or `~matplotlib.ticker.Formatter`
The format or formatter to use.
"""
if isinstance(formatter, Formatter):
raise NotImplementedError() # figure out how to swap out formatter
elif isinstance(formatter, str):
self._formatter_locator.format = formatter
else:
raise TypeError("formatter should be a string or a Formatter "
"instance")
def format_coord(self, value, format='auto'):
"""
Given the value of a coordinate, will format it according to the
format of the formatter_locator.
Parameters
----------
value : float
The value to format
format : {'auto', 'ascii', 'latex'}, optional
The format to use - by default the formatting will be adjusted
depending on whether Matplotlib is using LaTeX or MathTex. To
get plain ASCII strings, use format='ascii'.
"""
if not hasattr(self, "_fl_spacing"):
return "" # _update_ticks has not been called yet
fl = self._formatter_locator
if isinstance(fl, AngleFormatterLocator):
# Convert to degrees if needed
if self._coord_scale_to_deg is not None:
value *= self._coord_scale_to_deg
if self.coord_type == 'longitude':
value = wrap_angle_at(value, self.coord_wrap)
value = value * u.degree
value = value.to_value(fl._unit)
spacing = self._fl_spacing
string = fl.formatter(values=[value] * fl._unit, spacing=spacing, format=format)
return string[0]
def set_separator(self, separator):
"""
Set the separator to use for the angle major tick labels.
Parameters
----------
separator : str or tuple or None
The separator between numbers in sexagesimal representation. Can be
either a string or a tuple (or `None` for default).
"""
if not (self._formatter_locator.__class__ == AngleFormatterLocator):
raise TypeError("Separator can only be specified for angle coordinates")
if isinstance(separator, (str, tuple)) or separator is None:
self._formatter_locator.sep = separator
else:
raise TypeError("separator should be a string, a tuple, or None")
def set_format_unit(self, unit, decimal=None, show_decimal_unit=True):
"""
Set the unit for the major tick labels.
Parameters
----------
unit : class:`~astropy.units.Unit`
The unit to which the tick labels should be converted to.
decimal : bool, optional
Whether to use decimal formatting. By default this is `False`
for degrees or hours (which therefore use sexagesimal formatting)
and `True` for all other units.
show_decimal_unit : bool, optional
Whether to include units when in decimal mode.
"""
self._formatter_locator.format_unit = u.Unit(unit)
self._formatter_locator.decimal = decimal
self._formatter_locator.show_decimal_unit = show_decimal_unit
def get_format_unit(self):
"""
Get the unit for the major tick labels.
"""
return self._formatter_locator.format_unit
def set_ticks(self, values=None, spacing=None, number=None, size=None,
width=None, color=None, alpha=None, direction=None,
exclude_overlapping=None):
"""
Set the location and properties of the ticks.
At most one of the options from ``values``, ``spacing``, or
``number`` can be specified.
Parameters
----------
values : iterable, optional
The coordinate values at which to show the ticks.
spacing : float, optional
The spacing between ticks.
number : float, optional
The approximate number of ticks shown.
size : float, optional
The length of the ticks in points
color : str or tuple, optional
A valid Matplotlib color for the ticks
alpha : float, optional
The alpha value (transparency) for the ticks.
direction : {'in','out'}, optional
Whether the ticks should point inwards or outwards.
"""
if sum([values is None, spacing is None, number is None]) < 2:
raise ValueError("At most one of values, spacing, or number should "
"be specified")
if values is not None:
self._formatter_locator.values = values
elif spacing is not None:
self._formatter_locator.spacing = spacing
elif number is not None:
self._formatter_locator.number = number
if size is not None:
self.ticks.set_ticksize(size)
if width is not None:
self.ticks.set_linewidth(width)
if color is not None:
self.ticks.set_color(color)
if alpha is not None:
self.ticks.set_alpha(alpha)
if direction is not None:
if direction in ('in', 'out'):
self.ticks.set_tick_out(direction == 'out')
else:
raise ValueError("direction should be 'in' or 'out'")
if exclude_overlapping is not None:
warnings.warn("exclude_overlapping= should be passed to "
"set_ticklabel instead of set_ticks",
AstropyDeprecationWarning)
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
def set_ticks_position(self, position):
"""
Set where ticks should appear
Parameters
----------
position : str
The axes on which the ticks for this coordinate should appear.
Should be a string containing zero or more of ``'b'``, ``'t'``,
``'l'``, ``'r'``. For example, ``'lb'`` will lead the ticks to be
shown on the left and bottom axis.
"""
self.ticks.set_visible_axes(position)
def set_ticks_visible(self, visible):
"""
Set whether ticks are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide ticks
along this coordinate.
"""
self.ticks.set_visible(visible)
def set_ticklabel(self, color=None, size=None, pad=None,
exclude_overlapping=None, **kwargs):
"""
Set the visual properties for the tick labels.
Parameters
----------
size : float, optional
The size of the ticks labels in points
color : str or tuple, optional
A valid Matplotlib color for the tick labels
pad : float, optional
Distance in points between tick and label.
exclude_overlapping : bool, optional
Whether to exclude tick labels that overlap over each other.
kwargs
Other keyword arguments are passed to :class:`matplotlib.text.Text`.
"""
if size is not None:
self.ticklabels.set_size(size)
if color is not None:
self.ticklabels.set_color(color)
if pad is not None:
self.ticklabels.set_pad(pad)
if exclude_overlapping is not None:
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
self.ticklabels.set(**kwargs)
def set_ticklabel_position(self, position):
"""
Set where tick labels should appear
Parameters
----------
position : str
The axes on which the tick labels for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
tick labels to be shown on the left and bottom axis.
"""
self.ticklabels.set_visible_axes(position)
def set_ticklabel_visible(self, visible):
"""
Set whether the tick labels are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide this
coordinate's tick labels.
"""
self.ticklabels.set_visible(visible)
def set_axislabel(self, text, minpad=1, **kwargs):
"""
Set the text and optionally visual properties for the axis label.
Parameters
----------
text : str
The axis label text.
minpad : float, optional
The padding for the label in terms of axis label font size.
kwargs
Keywords are passed to :class:`matplotlib.text.Text`. These
can include keywords to set the ``color``, ``size``, ``weight``, and
other text properties.
"""
fontdict = kwargs.pop('fontdict', None)
# NOTE: When using plt.xlabel/plt.ylabel, minpad can get set explicitly
# to None so we need to make sure that in that case we change to a
# default numerical value.
if minpad is None:
minpad = 1
self.axislabels.set_text(text)
self.axislabels.set_minpad(minpad)
self.axislabels.set(**kwargs)
if fontdict is not None:
self.axislabels.update(fontdict)
def get_axislabel(self):
"""
Get the text for the axis label
Returns
-------
label : str
The axis label
"""
return self.axislabels.get_text()
def set_auto_axislabel(self, auto_label):
"""
Render default axis labels if no explicit label is provided.
Parameters
----------
auto_label : `bool`
`True` if default labels will be rendered.
"""
self._auto_axislabel = bool(auto_label)
def get_auto_axislabel(self):
"""
Render default axis labels if no explicit label is provided.
Returns
-------
auto_axislabel : `bool`
`True` if default labels will be rendered.
"""
return self._auto_axislabel
def _get_default_axislabel(self):
unit = self.get_format_unit() or self.coord_unit
if not unit or unit is u.one or self.coord_type in ('longitude', 'latitude'):
return f"{self.default_label}"
else:
return f"{self.default_label} [{unit:latex}]"
def set_axislabel_position(self, position):
"""
Set where axis labels should appear
Parameters
----------
position : str
The axes on which the axis label for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
axis label to be shown on the left and bottom axis.
"""
self.axislabels.set_visible_axes(position)
def set_axislabel_visibility_rule(self, rule):
"""
Set the rule used to determine when the axis label is drawn.
Parameters
----------
rule : str
If the rule is 'always' axis labels will always be drawn on the
axis. If the rule is 'ticks' the label will only be drawn if ticks
were drawn on that axis. If the rule is 'labels' the axis label
will only be drawn if tick labels were drawn on that axis.
"""
self.axislabels.set_visibility_rule(rule)
def get_axislabel_visibility_rule(self, rule):
"""
Get the rule used to determine when the axis label is drawn.
"""
return self.axislabels.get_visibility_rule()
@property
def locator(self):
return self._formatter_locator.locator
@property
def formatter(self):
return self._formatter_locator.formatter
def _draw_grid(self, renderer):
renderer.open_group('grid lines')
self._update_ticks()
if self.grid_lines_kwargs['visible']:
if isinstance(self.frame, RectangularFrame1D):
self._update_grid_lines_1d()
else:
if self._grid_type == 'lines':
self._update_grid_lines()
else:
self._update_grid_contour()
if self._grid_type == 'lines':
frame_patch = self.frame.patch
for path in self.grid_lines:
p = PathPatch(path, **self.grid_lines_kwargs)
p.set_clip_path(frame_patch)
p.draw(renderer)
elif self._grid is not None:
for line in self._grid.collections:
line.set(**self.grid_lines_kwargs)
line.draw(renderer)
renderer.close_group('grid lines')
def _draw_ticks(self, renderer, bboxes, ticklabels_bbox):
"""
Draw all ticks and ticklabels.
"""
renderer.open_group('ticks')
self.ticks.draw(renderer)
self.ticklabels.draw(renderer, bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
tick_out_size=self.ticks.out_size)
renderer.close_group('ticks')
def _draw_axislabels(self, renderer, bboxes, ticklabels_bbox, visible_ticks):
# Render the default axis label if no axis label is set.
if self._auto_axislabel and not self.get_axislabel():
self.set_axislabel(self._get_default_axislabel())
renderer.open_group('axis labels')
self.axislabels.draw(renderer, bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
coord_ticklabels_bbox=ticklabels_bbox[self],
ticks_locs=self.ticks.ticks_locs,
visible_ticks=visible_ticks)
renderer.close_group('axis labels')
def _update_ticks(self):
if self.coord_index is None:
return
# TODO: this method should be optimized for speed
# Here we determine the location and rotation of all the ticks. For
# each axis, we can check the intersections for the specific
# coordinate and once we have the tick positions, we can use the WCS
# to determine the rotations.
# Find the range of coordinates in all directions
coord_range = self.parent_map.get_coord_range()
# First find the ticks we want to show
tick_world_coordinates, self._fl_spacing = self.locator(*coord_range[self.coord_index])
if self.ticks.get_display_minor_ticks():
minor_ticks_w_coordinates = self._formatter_locator.minor_locator(self._fl_spacing, self.get_minor_frequency(), *coord_range[self.coord_index])
# We want to allow non-standard rectangular frames, so we just rely on
# the parent axes to tell us what the bounding frame is.
from . import conf
frame = self.frame.sample(conf.frame_boundary_samples)
self.ticks.clear()
self.ticklabels.clear()
self.lblinfo = []
self.lbl_world = []
# Look up parent axes' transform from data to figure coordinates.
#
# See:
# https://matplotlib.org/users/transforms_tutorial.html#the-transformation-pipeline
transData = self.parent_axes.transData
invertedTransLimits = transData.inverted()
for axis, spine in frame.items():
if not isinstance(self.frame, RectangularFrame1D):
# Determine tick rotation in display coordinates and compare to
# the normal angle in display coordinates.
pixel0 = spine.data
world0 = spine.world[:, self.coord_index]
with np.errstate(invalid='ignore'):
world0 = self.transform.transform(pixel0)[:, self.coord_index]
axes0 = transData.transform(pixel0)
# Advance 2 pixels in figure coordinates
pixel1 = axes0.copy()
pixel1[:, 0] += 2.0
pixel1 = invertedTransLimits.transform(pixel1)
with np.errstate(invalid='ignore'):
world1 = self.transform.transform(pixel1)[:, self.coord_index]
# Advance 2 pixels in figure coordinates
pixel2 = axes0.copy()
pixel2[:, 1] += 2.0 if self.frame.origin == 'lower' else -2.0
pixel2 = invertedTransLimits.transform(pixel2)
with np.errstate(invalid='ignore'):
world2 = self.transform.transform(pixel2)[:, self.coord_index]
dx = (world1 - world0)
dy = (world2 - world0)
# Rotate by 90 degrees
dx, dy = -dy, dx
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
dx *= self._coord_scale_to_deg
dy *= self._coord_scale_to_deg
# Here we wrap at 180 not self.coord_wrap since we want to
# always ensure abs(dx) < 180 and abs(dy) < 180
dx = wrap_angle_at(dx, 180.)
dy = wrap_angle_at(dy, 180.)
tick_angle = np.degrees(np.arctan2(dy, dx))
normal_angle_full = np.hstack([spine.normal_angle, spine.normal_angle[-1]])
with np.errstate(invalid='ignore'):
reset = (((normal_angle_full - tick_angle) % 360 > 90.) &
((tick_angle - normal_angle_full) % 360 > 90.))
tick_angle[reset] -= 180.
else:
rotation = 90 if axis == 'b' else -90
tick_angle = np.zeros((conf.frame_boundary_samples,)) + rotation
# We find for each interval the starting and ending coordinate,
# ensuring that we take wrapping into account correctly for
# longitudes.
w1 = spine.world[:-1, self.coord_index]
w2 = spine.world[1:, self.coord_index]
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
w1 = w1 * self._coord_scale_to_deg
w2 = w2 * self._coord_scale_to_deg
w1 = wrap_angle_at(w1, self.coord_wrap)
w2 = wrap_angle_at(w2, self.coord_wrap)
with np.errstate(invalid='ignore'):
w1[w2 - w1 > 180.] += 360
w2[w1 - w2 > 180.] += 360
if self._coord_scale_to_deg is not None:
w1 = w1 / self._coord_scale_to_deg
w2 = w2 / self._coord_scale_to_deg
# For longitudes, we need to check ticks as well as ticks + 360,
# since the above can produce pairs such as 359 to 361 or 0.5 to
# 1.5, both of which would match a tick at 0.75. Otherwise we just
# check the ticks determined above.
self._compute_ticks(tick_world_coordinates, spine, axis, w1, w2, tick_angle)
if self.ticks.get_display_minor_ticks():
self._compute_ticks(minor_ticks_w_coordinates, spine, axis, w1,
w2, tick_angle, ticks='minor')
# format tick labels, add to scene
text = self.formatter(self.lbl_world * tick_world_coordinates.unit, spacing=self._fl_spacing)
for kwargs, txt in zip(self.lblinfo, text):
self.ticklabels.add(text=txt, **kwargs)
def _compute_ticks(self, tick_world_coordinates, spine, axis, w1, w2,
tick_angle, ticks='major'):
if self.coord_type == 'longitude':
tick_world_coordinates_values = tick_world_coordinates.to_value(u.deg)
tick_world_coordinates_values = np.hstack([tick_world_coordinates_values,
tick_world_coordinates_values + 360])
tick_world_coordinates_values *= u.deg.to(self.coord_unit)
else:
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
for t in tick_world_coordinates_values:
# Find steps where a tick is present. We have to check
# separately for the case where the tick falls exactly on the
# frame points, otherwise we'll get two matches, one for w1 and
# one for w2.
with np.errstate(invalid='ignore'):
intersections = np.hstack([np.nonzero((t - w1) == 0)[0],
np.nonzero(((t - w1) * (t - w2)) < 0)[0]])
# But we also need to check for intersection with the last w2
if t - w2[-1] == 0:
intersections = np.append(intersections, len(w2) - 1)
# Loop over ticks, and find exact pixel coordinates by linear
# interpolation
for imin in intersections:
imax = imin + 1
if np.allclose(w1[imin], w2[imin], rtol=1.e-13, atol=1.e-13):
continue # tick is exactly aligned with frame
else:
frac = (t - w1[imin]) / (w2[imin] - w1[imin])
x_data_i = spine.data[imin, 0] + frac * (spine.data[imax, 0] - spine.data[imin, 0])
y_data_i = spine.data[imin, 1] + frac * (spine.data[imax, 1] - spine.data[imin, 1])
x_pix_i = spine.pixel[imin, 0] + frac * (spine.pixel[imax, 0] - spine.pixel[imin, 0])
y_pix_i = spine.pixel[imin, 1] + frac * (spine.pixel[imax, 1] - spine.pixel[imin, 1])
delta_angle = tick_angle[imax] - tick_angle[imin]
if delta_angle > 180.:
delta_angle -= 360.
elif delta_angle < -180.:
delta_angle += 360.
angle_i = tick_angle[imin] + frac * delta_angle
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
t *= self._coord_scale_to_deg
world = wrap_angle_at(t, self.coord_wrap)
if self._coord_scale_to_deg is not None:
world /= self._coord_scale_to_deg
else:
world = t
if ticks == 'major':
self.ticks.add(axis=axis,
pixel=(x_data_i, y_data_i),
world=world,
angle=angle_i,
axis_displacement=imin + frac)
# store information to pass to ticklabels.add
# it's faster to format many ticklabels at once outside
# of the loop
self.lblinfo.append(dict(axis=axis,
pixel=(x_pix_i, y_pix_i),
world=world,
angle=spine.normal_angle[imin],
axis_displacement=imin + frac))
self.lbl_world.append(world)
else:
self.ticks.add_minor(minor_axis=axis,
minor_pixel=(x_data_i, y_data_i),
minor_world=world,
minor_angle=angle_i,
minor_axis_displacement=imin + frac)
def display_minor_ticks(self, display_minor_ticks):
"""
Display minor ticks for this coordinate.
Parameters
----------
display_minor_ticks : bool
Whether or not to display minor ticks.
"""
self.ticks.display_minor_ticks(display_minor_ticks)
def get_minor_frequency(self):
return self.minor_frequency
def set_minor_frequency(self, frequency):
"""
Set the frequency of minor ticks per major ticks.
Parameters
----------
frequency : int
The number of minor ticks per major ticks.
"""
self.minor_frequency = frequency
def _update_grid_lines_1d(self):
if self.coord_index is None:
return
x_ticks_pos = [a[0] for a in self.ticks.pixel['b']]
ymin, ymax = self.parent_axes.get_ylim()
self.grid_lines = []
for x_coord in x_ticks_pos:
pixel = [[x_coord, ymin], [x_coord, ymax]]
self.grid_lines.append(Path(pixel))
def _update_grid_lines(self):
# For 3-d WCS with a correlated third axis, the *proper* way of
# drawing a grid should be to find the world coordinates of all pixels
# and drawing contours. What we are doing here assumes that we can
# define the grid lines with just two of the coordinates (and
# therefore assumes that the other coordinates are fixed and set to
# the value in the slice). Here we basically assume that if the WCS
# had a third axis, it has been abstracted away in the transformation.
if self.coord_index is None:
return
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
n_coord = len(tick_world_coordinates_values)
from . import conf
n_samples = conf.grid_samples
xy_world = np.zeros((n_samples * n_coord, 2))
self.grid_lines = []
for iw, w in enumerate(tick_world_coordinates_values):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
if self.coord_index == 0:
xy_world[subset, 0] = np.repeat(w, n_samples)
xy_world[subset, 1] = np.linspace(coord_range[1][0], coord_range[1][1], n_samples)
else:
xy_world[subset, 0] = np.linspace(coord_range[0][0], coord_range[0][1], n_samples)
xy_world[subset, 1] = np.repeat(w, n_samples)
# We now convert all the world coordinates to pixel coordinates in a
# single go rather than doing this in the gridline to path conversion
# to fully benefit from vectorized coordinate transformations.
# Transform line to pixel coordinates
pixel = self.transform.inverted().transform(xy_world)
# Create round-tripped values for checking
xy_world_round = self.transform.transform(pixel)
for iw in range(n_coord):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
self.grid_lines.append(self._get_gridline(xy_world[subset], pixel[subset], xy_world_round[subset]))
def _get_gridline(self, xy_world, pixel, xy_world_round):
if self.coord_type == 'scalar':
return get_gridline_path(xy_world, pixel)
else:
return get_lon_lat_path(xy_world, pixel, xy_world_round)
def _clear_grid_contour(self):
if hasattr(self, '_grid') and self._grid:
for line in self._grid.collections:
line.remove()
def _update_grid_contour(self):
if self.coord_index is None:
return
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
from . import conf
res = conf.contour_grid_samples
x, y = np.meshgrid(np.linspace(xmin, xmax, res),
np.linspace(ymin, ymax, res))
pixel = np.array([x.ravel(), y.ravel()]).T
world = self.transform.transform(pixel)
field = world[:, self.coord_index].reshape(res, res).T
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
# tick_world_coordinates is a Quantities array and we only needs its values
tick_world_coordinates_values = tick_world_coordinates.value
if self.coord_type == 'longitude':
# Find biggest gap in tick_world_coordinates and wrap in middle
# For now just assume spacing is equal, so any mid-point will do
mid = 0.5 * (tick_world_coordinates_values[0] + tick_world_coordinates_values[1])
field = wrap_angle_at(field, mid)
tick_world_coordinates_values = wrap_angle_at(tick_world_coordinates_values, mid)
# Replace wraps by NaN
with np.errstate(invalid='ignore'):
reset = (np.abs(np.diff(field[:, :-1], axis=0)) > 180) | (np.abs(np.diff(field[:-1, :], axis=1)) > 180)
field[:-1, :-1][reset] = np.nan
field[1:, :-1][reset] = np.nan
field[:-1, 1:][reset] = np.nan
field[1:, 1:][reset] = np.nan
if len(tick_world_coordinates_values) > 0:
with np.errstate(invalid='ignore'):
self._grid = self.parent_axes.contour(x, y, field.transpose(), levels=np.sort(tick_world_coordinates_values))
else:
self._grid = None
def tick_params(self, which='both', **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this will not work correctly if
the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this will not work
correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : str, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
# First do some sanity checking on the keyword arguments
# colors= is a fallback default for color and labelcolor
if 'colors' in kwargs:
if 'color' not in kwargs:
kwargs['color'] = kwargs['colors']
if 'labelcolor' not in kwargs:
kwargs['labelcolor'] = kwargs['colors']
# The only property that can be set *specifically* for minor ticks is
# the length. In future we could consider having a separate Ticks instance
# for minor ticks so that e.g. the color can be set separately.
if which == 'minor':
if len(set(kwargs) - {'length'}) > 0:
raise ValueError("When setting which='minor', the only "
"property that can be set at the moment is "
"'length' (the minor tick length)")
else:
if 'length' in kwargs:
self.ticks.set_minor_ticksize(kwargs['length'])
return
# At this point, we can now ignore the 'which' argument.
# Set the tick arguments
self.set_ticks(size=kwargs.get('length'),
width=kwargs.get('width'),
color=kwargs.get('color'),
direction=kwargs.get('direction'))
# Set the tick position
position = None
for arg in ('bottom', 'left', 'top', 'right'):
if arg in kwargs and position is None:
position = ''
if kwargs.get(arg):
position += arg[0]
if position is not None:
self.set_ticks_position(position)
# Set the tick label arguments.
self.set_ticklabel(color=kwargs.get('labelcolor'),
size=kwargs.get('labelsize'),
pad=kwargs.get('pad'))
# Set the tick label position
position = None
for arg in ('bottom', 'left', 'top', 'right'):
if 'label' + arg in kwargs and position is None:
position = ''
if kwargs.get('label' + arg):
position += arg[0]
if position is not None:
self.set_ticklabel_position(position)
# And the grid settings
if 'grid_color' in kwargs:
self.grid_lines_kwargs['edgecolor'] = kwargs['grid_color']
if 'grid_alpha' in kwargs:
self.grid_lines_kwargs['alpha'] = kwargs['grid_alpha']
if 'grid_linewidth' in kwargs:
self.grid_lines_kwargs['linewidth'] = kwargs['grid_linewidth']
if 'grid_linestyle' in kwargs:
if kwargs['grid_linestyle'] in LINES_TO_PATCHES_LINESTYLE:
self.grid_lines_kwargs['linestyle'] = LINES_TO_PATCHES_LINESTYLE[kwargs['grid_linestyle']]
else:
self.grid_lines_kwargs['linestyle'] = kwargs['grid_linestyle']
| 39.969973
| 155
| 0.584629
|
58d3c8fce0a4bbaae8d926879f5196318a85d451
| 9,052
|
py
|
Python
|
flaxlight/server/start_service.py
|
Flax-Network/flax-light-wallet
|
1745850a28a47bbbc4b5f3d460f35b34b4ed4f25
|
[
"Apache-2.0"
] | 1
|
2021-12-02T14:38:11.000Z
|
2021-12-02T14:38:11.000Z
|
flaxlight/server/start_service.py
|
Flax-Network/flax-light-wallet
|
1745850a28a47bbbc4b5f3d460f35b34b4ed4f25
|
[
"Apache-2.0"
] | null | null | null |
flaxlight/server/start_service.py
|
Flax-Network/flax-light-wallet
|
1745850a28a47bbbc4b5f3d460f35b34b4ed4f25
|
[
"Apache-2.0"
] | 6
|
2021-11-21T00:38:27.000Z
|
2021-12-03T01:25:19.000Z
|
import asyncio
import os
import logging
import logging.config
import signal
from sys import platform
from typing import Any, Callable, List, Optional, Tuple
from flaxlight.daemon.server import singleton, service_launch_lock_path
from flaxlight.server.ssl_context import flaxlight_ssl_ca_paths, private_ssl_ca_paths
try:
import uvloop
except ImportError:
uvloop = None
from flaxlight.rpc.rpc_server import start_rpc_server
from flaxlight.server.outbound_message import NodeType
from flaxlight.server.server import FlaxServer
from flaxlight.server.upnp import UPnP
from flaxlight.types.peer_info import PeerInfo
from flaxlight.util.flaxlight_logging import initialize_logging
from flaxlight.util.config import load_config, load_config_cli
from flaxlight.util.setproctitle import setproctitle
from flaxlight.util.ints import uint16
from .reconnect_task import start_reconnect_task
# this is used to detect whether we are running in the main process or not, in
# signal handlers. We need to ignore signals in the sub processes.
main_pid: Optional[int] = None
class Service:
def __init__(
self,
root_path,
node: Any,
peer_api: Any,
node_type: NodeType,
advertised_port: int,
service_name: str,
network_id: str,
upnp_ports: List[int] = [],
server_listen_ports: List[int] = [],
connect_peers: List[PeerInfo] = [],
auth_connect_peers: bool = True,
on_connect_callback: Optional[Callable] = None,
rpc_info: Optional[Tuple[type, int]] = None,
parse_cli_args=True,
connect_to_daemon=True,
) -> None:
self.root_path = root_path
self.config = load_config(root_path, "config.yaml")
ping_interval = self.config.get("ping_interval")
self.self_hostname = self.config.get("self_hostname")
self.daemon_port = self.config.get("daemon_port")
assert ping_interval is not None
self._connect_to_daemon = connect_to_daemon
self._node_type = node_type
self._service_name = service_name
self._rpc_task: Optional[asyncio.Task] = None
self._rpc_close_task: Optional[asyncio.Task] = None
self._network_id: str = network_id
proctitle_name = f"flaxlight_{service_name}"
setproctitle(proctitle_name)
self._log = logging.getLogger(service_name)
if parse_cli_args:
service_config = load_config_cli(root_path, "config.yaml", service_name)
else:
service_config = load_config(root_path, "config.yaml", service_name)
initialize_logging(service_name, service_config["logging"], root_path)
self._rpc_info = rpc_info
private_ca_crt, private_ca_key = private_ssl_ca_paths(root_path, self.config)
flaxlight_ca_crt, flaxlight_ca_key = flaxlight_ssl_ca_paths(root_path, self.config)
inbound_rlp = self.config.get("inbound_rate_limit_percent")
outbound_rlp = self.config.get("outbound_rate_limit_percent")
assert inbound_rlp and outbound_rlp
self._server = FlaxServer(
advertised_port,
node,
peer_api,
node_type,
ping_interval,
network_id,
inbound_rlp,
outbound_rlp,
root_path,
service_config,
(private_ca_crt, private_ca_key),
(flaxlight_ca_crt, flaxlight_ca_key),
name=f"{service_name}_server",
)
f = getattr(node, "set_server", None)
if f:
f(self._server)
else:
self._log.warning(f"No set_server method for {service_name}")
self._connect_peers = connect_peers
self._auth_connect_peers = auth_connect_peers
self._upnp_ports = upnp_ports
self._server_listen_ports = server_listen_ports
self._api = peer_api
self._node = node
self._did_start = False
self._is_stopping = asyncio.Event()
self._stopped_by_rpc = False
self._on_connect_callback = on_connect_callback
self._advertised_port = advertised_port
self._reconnect_tasks: List[asyncio.Task] = []
self.upnp: Optional[UPnP] = None
async def start(self, **kwargs) -> None:
# we include `kwargs` as a hack for the wallet, which for some
# reason allows parameters to `_start`. This is serious BRAIN DAMAGE,
# and should be fixed at some point.
# TODO: move those parameters to `__init__`
if self._did_start:
return None
assert self.self_hostname is not None
assert self.daemon_port is not None
self._did_start = True
self._enable_signals()
await self._node._start(**kwargs)
for port in self._upnp_ports:
if self.upnp is None:
self.upnp = UPnP()
self.upnp.remap(port)
await self._server.start_server(self._on_connect_callback)
self._reconnect_tasks = [
start_reconnect_task(self._server, _, self._log, self._auth_connect_peers) for _ in self._connect_peers
]
self._log.info(f"Started {self._service_name} service on network_id: {self._network_id}")
self._rpc_close_task = None
if self._rpc_info:
rpc_api, rpc_port = self._rpc_info
self._rpc_task = asyncio.create_task(
start_rpc_server(
rpc_api(self._node),
self.self_hostname,
self.daemon_port,
uint16(rpc_port),
self.stop,
self.root_path,
self.config,
self._connect_to_daemon,
)
)
async def run(self) -> None:
lockfile = singleton(service_launch_lock_path(self.root_path, self._service_name))
if lockfile is None:
self._log.error(f"{self._service_name}: already running")
raise ValueError(f"{self._service_name}: already running")
await self.start()
await self.wait_closed()
def _enable_signals(self) -> None:
global main_pid
main_pid = os.getpid()
signal.signal(signal.SIGINT, self._accept_signal)
signal.signal(signal.SIGTERM, self._accept_signal)
if platform == "win32" or platform == "cygwin":
# pylint: disable=E1101
signal.signal(signal.SIGBREAK, self._accept_signal) # type: ignore
def _accept_signal(self, signal_number: int, stack_frame):
self._log.info(f"got signal {signal_number}")
# we only handle signals in the main process. In the ProcessPoolExecutor
# processes, we have to ignore them. We'll shut them down gracefully
# from the main process
global main_pid
if os.getpid() != main_pid:
return
self.stop()
def stop(self) -> None:
if not self._is_stopping.is_set():
self._is_stopping.set()
# start with UPnP, since this can take a while, we want it to happen
# in the background while shutting down everything else
for port in self._upnp_ports:
if self.upnp is not None:
self.upnp.release(port)
self._log.info("Cancelling reconnect task")
for _ in self._reconnect_tasks:
_.cancel()
self._log.info("Closing connections")
self._server.close_all()
self._node._close()
self._node._shut_down = True
self._log.info("Calling service stop callback")
if self._rpc_task is not None:
self._log.info("Closing RPC server")
async def close_rpc_server() -> None:
if self._rpc_task:
await (await self._rpc_task)()
self._rpc_close_task = asyncio.create_task(close_rpc_server())
async def wait_closed(self) -> None:
await self._is_stopping.wait()
self._log.info("Waiting for socket to be closed (if opened)")
self._log.info("Waiting for FlaxServer to be closed")
await self._server.await_closed()
if self._rpc_close_task:
self._log.info("Waiting for RPC server")
await self._rpc_close_task
self._log.info("Closed RPC server")
self._log.info("Waiting for service _await_closed callback")
await self._node._await_closed()
if self.upnp is not None:
# this is a blocking call, waiting for the UPnP thread to exit
self.upnp.shutdown()
self._log.info(f"Service {self._service_name} at port {self._advertised_port} fully closed")
async def async_run_service(*args, **kwargs) -> None:
service = Service(*args, **kwargs)
return await service.run()
def run_service(*args, **kwargs) -> None:
if uvloop is not None:
uvloop.install()
return asyncio.run(async_run_service(*args, **kwargs))
| 35.359375
| 115
| 0.638202
|
ed4e563536cbee8441a79dd705406095fe0941b7
| 373
|
py
|
Python
|
allennlp_models/version.py
|
zhiyangxu-umass/allennlp-models
|
d922f7a8075387ebed1a3e38e588345f706d3f02
|
[
"Apache-2.0"
] | null | null | null |
allennlp_models/version.py
|
zhiyangxu-umass/allennlp-models
|
d922f7a8075387ebed1a3e38e588345f706d3f02
|
[
"Apache-2.0"
] | 11
|
2021-11-29T13:23:10.000Z
|
2022-03-28T13:24:20.000Z
|
allennlp_models/version.py
|
zhiyangxu-umass/allennlp-models
|
d922f7a8075387ebed1a3e38e588345f706d3f02
|
[
"Apache-2.0"
] | null | null | null |
import os
_MAJOR = "2"
_MINOR = "6"
_PATCH = "0"
# This is mainly for nightly builds which have the suffix ".dev$DATE". See
# https://semver.org/#is-v123-a-semantic-version for the semantics.
_SUFFIX = os.environ.get("ALLENNLP_MODELS_VERSION_SUFFIX", "")
VERSION_SHORT = "{0}.{1}".format(_MAJOR, _MINOR)
VERSION = "{0}.{1}.{2}{3}".format(_MAJOR, _MINOR, _PATCH, _SUFFIX)
| 31.083333
| 74
| 0.697051
|
43a368050e10ced68bb3353ae020282f95a61f4f
| 47,073
|
py
|
Python
|
python35/Lib/site-packages/sklearn/decomposition/nmf.py
|
Matchoc/python_env
|
859d84d1717a265a4085ad29706b12c19c62d36f
|
[
"Apache-2.0"
] | null | null | null |
python35/Lib/site-packages/sklearn/decomposition/nmf.py
|
Matchoc/python_env
|
859d84d1717a265a4085ad29706b12c19c62d36f
|
[
"Apache-2.0"
] | null | null | null |
python35/Lib/site-packages/sklearn/decomposition/nmf.py
|
Matchoc/python_env
|
859d84d1717a265a4085ad29706b12c19c62d36f
|
[
"Apache-2.0"
] | 1
|
2020-05-07T11:04:14.000Z
|
2020-05-07T11:04:14.000Z
|
""" Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck
# Mathieu Blondel <mathieu@mblondel.org>
# Tom Dupre la Tour
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (Projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import division, print_function
from math import sqrt
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted, check_non_negative
from ..utils import deprecated
from ..exceptions import ConvergenceWarning
from .cdnmf_fast import _update_cdnmf_fast
INTEGER_TYPES = (numbers.Integral, np.integer)
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _check_init(A, shape, whom):
A = check_array(A)
if np.shape(A) != shape:
raise ValueError('Array with wrong shape passed to %s. Expected %s, '
'but got %s ' % (whom, shape, np.shape(A)))
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError('Array passed to %s is full of zeros.' % whom)
def _safe_compute_error(X, W, H):
"""Frobenius norm between X and WH, safe for sparse array"""
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(norm_X + norm_WH - 2. * cross_prod)
return error
def _check_string_param(sparseness, solver):
allowed_sparseness = (None, 'data', 'components')
if sparseness not in allowed_sparseness:
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, allowed_sparseness))
allowed_solver = ('pg', 'cd')
if solver not in allowed_solver:
raise ValueError(
'Invalid solver parameter: got %r instead of one of %r' %
(solver, allowed_solver))
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : integer
The number of components desired in the approximation.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise 'random'.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
eps : float
Truncate all values less then this in output to zero.
random_state : int seed, RandomState instance, or None (default)
Random number generator seed control, used in 'nndsvdar' and
'random' modes.
Returns
-------
W : array-like, shape (n_samples, n_components)
Initial guesses for solving X ~= WH
H : array-like, shape (n_components, n_features)
Initial guesses for solving X ~= WH
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if init is None:
if n_components < n_features:
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features)
W = avg * rng.randn(n_samples, n_components)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
V : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = fast_dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
def _update_projected_gradient_w(X, W, H, tolW, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = H.shape[0]
if sparseness is None:
Wt, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(beta) * np.ones((1,
n_components_))]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(eta) * np.eye(n_components_)]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return Wt.T, gradW.T, iterW
def _update_projected_gradient_h(X, W, H, tolH, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = W.shape[1]
if sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((n_components_, n_features))]),
safe_vstack([W,
np.sqrt(eta) * np.eye(n_components_)]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W, np.sqrt(beta) * np.ones((1, n_components_))]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return H, gradH, iterH
def _fit_projected_gradient(X, W, H, tol, max_iter,
nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Compute Non-negative Matrix Factorization (NMF) with Projected Gradient
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with Sparseness Constraints.
Journal of Machine Learning Research 2004.
"""
gradW = (np.dot(W, np.dot(H, H.T)) -
safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H) -
safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition
# as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
W, gradW, iterW = _update_projected_gradient_w(X, W, H, tolW,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _update_projected_gradient_h(X, W, H, tolH,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
W, _, _ = _update_projected_gradient_w(X, W, H, tol, nls_max_iter,
alpha, l1_ratio, sparseness,
beta, eta)
return W, H, n_iter
def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle,
random_state):
"""Helper function for _fit_coordinate_descent
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...)
"""
n_components = Ht.shape[1]
HHt = fast_dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L2 regularization corresponds to increase of the diagonal of HHt
if l2_reg != 0.:
# adds l2_reg only on the diagonal
HHt.flat[::n_components + 1] += l2_reg
# L1 regularization corresponds to decrease of each element of XHt
if l1_reg != 0.:
XHt -= l1_reg
if shuffle:
permutation = random_state.permutation(n_components)
else:
permutation = np.arange(n_components)
# The following seems to be required on 64-bit Windows w/ Python 3.5.
permutation = np.asarray(permutation, dtype=np.intp)
return _update_cdnmf_fast(W, HHt, XHt, permutation)
def _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, alpha=0.001,
l1_ratio=0., regularization=None, update_H=True,
verbose=0, shuffle=False, random_state=None):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order='C')
X = check_array(X, accept_sparse='csr')
# L1 and L2 regularization
l1_H, l2_H, l1_W, l2_W = 0, 0, 0, 0
if regularization in ('both', 'components'):
alpha = float(alpha)
l1_H = l1_ratio * alpha
l2_H = (1. - l1_ratio) * alpha
if regularization in ('both', 'transformation'):
alpha = float(alpha)
l1_W = l1_ratio * alpha
l2_W = (1. - l1_ratio) * alpha
rng = check_random_state(random_state)
for n_iter in range(max_iter):
violation = 0.
# Update W
violation += _update_coordinate_descent(X, W, Ht, l1_W, l2_W,
shuffle, rng)
# Update H
if update_H:
violation += _update_coordinate_descent(X.T, Ht, W, l1_H, l2_H,
shuffle, rng)
if n_iter == 0:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def non_negative_factorization(X, W=None, H=None, n_components=None,
init='random', update_H=True, solver='cd',
tol=1e-4, max_iter=200, alpha=0., l1_ratio=0.,
regularization=None, random_state=None,
verbose=0, shuffle=False, nls_max_iter=2000,
sparseness=None, beta=1, eta=0.1):
"""Compute Non-negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
If update_H=False, it is used as a constant, to solve for W only.
n_components : integer
Number of components, if n_components is not set all features
are kept.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a (deprecated) Projected Gradient solver.
'cd' is a Coordinate Descent solver.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
_check_string_param(sparseness, solver)
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
if not isinstance(n_components, INTEGER_TYPES) or n_components <= 0:
raise ValueError("Number of components must be a positive integer;"
" got (n_components=%r)" % n_components)
if not isinstance(max_iter, INTEGER_TYPES) or max_iter < 0:
raise ValueError("Maximum number of iterations must be a positive integer;"
" got (max_iter=%r)" % max_iter)
if not isinstance(tol, numbers.Number) or tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % tol)
# check W and H, or initialize them
if init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=init,
random_state=random_state)
if solver == 'pg':
warnings.warn("'pg' solver will be removed in release 0.19."
" Use 'cd' solver instead.", DeprecationWarning)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(X, W, H, tol,
max_iter,
nls_max_iter,
alpha, l1_ratio,
sparseness,
beta, eta)
else: # transform
W, H, n_iter = _update_projected_gradient_w(X, W, H,
tol, nls_max_iter,
alpha, l1_ratio,
sparseness, beta,
eta)
elif solver == 'cd':
W, H, n_iter = _fit_coordinate_descent(X, W, H, tol,
max_iter,
alpha, l1_ratio,
regularization,
update_H=update_H,
verbose=verbose,
shuffle=shuffle,
random_state=random_state)
else:
raise ValueError("Invalid solver parameter '%s'." % solver)
if n_iter == max_iter:
warnings.warn("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter, ConvergenceWarning)
return W, H, n_iter
class NMF(BaseEstimator, TransformerMixin):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a Projected Gradient solver (deprecated).
'cd' is a Coordinate Descent solver (recommended).
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent
solver.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, init=None, solver='cd',
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0, shuffle=False,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
if sparseness is not None:
warnings.warn("Controlling regularization through the sparseness,"
" beta and eta arguments is only available"
" for 'pg' solver, which will be removed"
" in release 0.19. Use another solver with L1 or L2"
" regularization instead.", DeprecationWarning)
self.nls_max_iter = nls_max_iter
self.sparseness = sparseness
self.beta = beta
self.eta = eta
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
W, H, n_iter_ = non_negative_factorization(
X=X, W=W, H=H, n_components=self.n_components,
init=self.init, update_H=True, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
if self.solver == 'pg':
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
self.reconstruction_err_ = _safe_compute_error(X, W, H)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Attributes
----------
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'n_components_')
W, _, n_iter_ = non_negative_factorization(
X=X, W=None, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
self.n_iter_ = n_iter_
return W
def inverse_transform(self, W):
"""Transform data back to its original space.
Parameters
----------
W: {array-like, sparse matrix}, shape (n_samples, n_components)
Transformed data matrix
Returns
-------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix of original shape
.. versionadded:: 0.18
"""
check_is_fitted(self, 'n_components_')
return np.dot(W, self.components_)
@deprecated("It will be removed in release 0.19. Use NMF instead."
"'pg' solver is still available until release 0.19.")
class ProjectedGradientNMF(NMF):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a Projected Gradient solver (deprecated).
'cd' is a Coordinate Descent solver (recommended).
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent
solver.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
super(ProjectedGradientNMF, self).__init__(
n_components=n_components, init=init, solver='pg', tol=tol,
max_iter=max_iter, random_state=random_state, alpha=alpha,
l1_ratio=l1_ratio, verbose=verbose, nls_max_iter=nls_max_iter,
sparseness=sparseness, beta=beta, eta=eta)
| 35.906178
| 83
| 0.598538
|
44cad153bebb635da9c2efbc583cb4111be3a953
| 7,405
|
py
|
Python
|
nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnglobal_intranetip_binding.py
|
guardicore/nitro-python
|
5346a5086134aead80968f15a41ff527adaa0ec1
|
[
"Apache-2.0"
] | null | null | null |
nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnglobal_intranetip_binding.py
|
guardicore/nitro-python
|
5346a5086134aead80968f15a41ff527adaa0ec1
|
[
"Apache-2.0"
] | null | null | null |
nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnglobal_intranetip_binding.py
|
guardicore/nitro-python
|
5346a5086134aead80968f15a41ff527adaa0ec1
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vpnglobal_intranetip_binding(base_resource) :
""" Binding class showing the intranetip that can be bound to vpnglobal.
"""
def __init__(self) :
self._intranetip = None
self._netmask = None
self._gotopriorityexpression = None
self.___count = None
@property
def intranetip(self) :
r"""The intranet ip address or range.
"""
try :
return self._intranetip
except Exception as e:
raise e
@intranetip.setter
def intranetip(self, intranetip) :
r"""The intranet ip address or range.
"""
try :
self._intranetip = intranetip
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
r"""Applicable only to advance vpn session policy. An expression or other value specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
r"""Applicable only to advance vpn session policy. An expression or other value specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def netmask(self) :
r"""The intranet ip address or range's netmask.
"""
try :
return self._netmask
except Exception as e:
raise e
@netmask.setter
def netmask(self, netmask) :
r"""The intranet ip address or range's netmask.
"""
try :
self._netmask = netmask
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vpnglobal_intranetip_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnglobal_intranetip_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def filter_add_parameters(cls, resource) :
r""" Use this function to create a resource with only add operation specific parameters.
"""
addresource = vpnglobal_intranetip_binding()
addresource.intranetip = resource.intranetip
addresource.netmask = resource.netmask
addresource.gotopriorityexpression = resource.gotopriorityexpression
return addresource
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = cls.filter_add_parameters(resource)
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [vpnglobal_intranetip_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_add_parameters(resource[i])
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def filter_delete_parameters(cls, resource) :
r""" Use this function to create a resource with only delete operation specific parameters.
"""
deleteresource = vpnglobal_intranetip_binding()
deleteresource.intranetip = resource.intranetip
deleteresource.netmask = resource.netmask
return deleteresource
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = cls.filter_delete_parameters(resource)
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [vpnglobal_intranetip_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i] = cls.filter_delete_parameters(resource[i])
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
r""" Use this API to fetch a vpnglobal_intranetip_binding resources.
"""
try :
obj = vpnglobal_intranetip_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
r""" Use this API to fetch filtered set of vpnglobal_intranetip_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnglobal_intranetip_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
r""" Use this API to count vpnglobal_intranetip_binding resources configued on NetScaler.
"""
try :
obj = vpnglobal_intranetip_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
r""" Use this API to count the filtered set of vpnglobal_intranetip_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnglobal_intranetip_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Crlcheck:
Mandatory = "Mandatory"
Optional = "Optional"
class Ocspcheck:
Mandatory = "Mandatory"
Optional = "Optional"
class vpnglobal_intranetip_binding_response(base_response) :
def __init__(self, length=1) :
self.vpnglobal_intranetip_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vpnglobal_intranetip_binding = [vpnglobal_intranetip_binding() for _ in range(length)]
| 31.244726
| 195
| 0.739905
|
236a45dba11515bc6349a011fdaa098af792ca8e
| 4,063
|
py
|
Python
|
runtime/tests/automl_runtime/forecast/utils_test.py
|
wenfeiy-db/automl
|
f63e259b0db0a19ca87c3ea851ab749e3ab57c3b
|
[
"Apache-2.0"
] | null | null | null |
runtime/tests/automl_runtime/forecast/utils_test.py
|
wenfeiy-db/automl
|
f63e259b0db0a19ca87c3ea851ab749e3ab57c3b
|
[
"Apache-2.0"
] | null | null | null |
runtime/tests/automl_runtime/forecast/utils_test.py
|
wenfeiy-db/automl
|
f63e259b0db0a19ca87c3ea851ab749e3ab57c3b
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) 2022 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import pandas as pd
from databricks.automl_runtime.forecast.utils import generate_cutoffs
class TestGenerateCutoffs(unittest.TestCase):
def setUp(self) -> None:
self.X = pd.DataFrame(
pd.date_range(start="2020-07-01", end="2020-08-30", freq='d'), columns=["ds"]
).rename_axis("y").reset_index()
def test_generate_cutoffs_success(self):
cutoffs = generate_cutoffs(self.X, horizon=7, unit="d", seasonal_period=7, num_folds=3)
self.assertEqual([pd.Timestamp('2020-08-19 12:00:00'), pd.Timestamp('2020-08-23 00:00:00')], cutoffs)
def test_generate_cutoffs_success_large_num_folds(self):
cutoffs = generate_cutoffs(self.X, horizon=7, unit="d", seasonal_period=1, num_folds=20)
self.assertEqual([pd.Timestamp('2020-07-22 12:00:00'),
pd.Timestamp('2020-07-26 00:00:00'),
pd.Timestamp('2020-07-29 12:00:00'),
pd.Timestamp('2020-08-02 00:00:00'),
pd.Timestamp('2020-08-05 12:00:00'),
pd.Timestamp('2020-08-09 00:00:00'),
pd.Timestamp('2020-08-12 12:00:00'),
pd.Timestamp('2020-08-16 00:00:00'),
pd.Timestamp('2020-08-19 12:00:00'),
pd.Timestamp('2020-08-23 00:00:00')], cutoffs)
def test_generate_cutoffs_success_with_gaps(self):
df = pd.DataFrame(
pd.date_range(start="2020-07-01", periods=30, freq='3d'), columns=["ds"]
).rename_axis("y").reset_index()
cutoffs = generate_cutoffs(df, horizon=1, unit="d", seasonal_period=1, num_folds=5)
self.assertEqual([pd.Timestamp('2020-09-16 00:00:00'),
pd.Timestamp('2020-09-19 00:00:00'),
pd.Timestamp('2020-09-22 00:00:00'),
pd.Timestamp('2020-09-25 00:00:00')], cutoffs)
def test_generate_cutoffs_success_hourly(self):
df = pd.DataFrame(
pd.date_range(start="2020-07-01", periods=168, freq='h'), columns=["ds"]
).rename_axis("y").reset_index()
cutoffs = generate_cutoffs(df, horizon=6, unit="h", seasonal_period=24, num_folds=5)
self.assertEqual([pd.Timestamp('2020-07-07 08:00:00'),
pd.Timestamp('2020-07-07 11:00:00'),
pd.Timestamp('2020-07-07 14:00:00'),
pd.Timestamp('2020-07-07 17:00:00')], cutoffs)
def test_generate_cutoffs_success_weekly(self):
df = pd.DataFrame(
pd.date_range(start="2020-07-01", periods=52, freq='W'), columns=["ds"]
).rename_axis("y").reset_index()
cutoffs = generate_cutoffs(df, horizon=4, unit="W", seasonal_period=1, num_folds=3)
self.assertEqual([pd.Timestamp('2021-05-16 00:00:00'), pd.Timestamp('2021-05-30 00:00:00')], cutoffs)
def test_generate_cutoffs_failure_horizon_too_large(self):
with self.assertRaisesRegex(ValueError, "Less data than horizon after initial window. "
"Make horizon shorter."):
generate_cutoffs(self.X, horizon=20, unit="d", seasonal_period=1, num_folds=3)
def test_generate_cutoffs_less_data(self):
with self.assertRaisesRegex(ValueError, "Less data than horizon."):
generate_cutoffs(self.X, horizon=100, unit="days", seasonal_period=1, num_folds=3)
| 48.951807
| 109
| 0.61654
|
5d12e549c7600e4e25ac11cfddf83d7f92d7f6e7
| 5,941
|
py
|
Python
|
venv/lib/python2.7/site-packages/flask_admin/form/fields.py
|
MarioAer/BubblesData
|
849cc6428b5e8d64f5517f94a714e3f737bfc75d
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/flask_admin/form/fields.py
|
MarioAer/BubblesData
|
849cc6428b5e8d64f5517f94a714e3f737bfc75d
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/flask_admin/form/fields.py
|
MarioAer/BubblesData
|
849cc6428b5e8d64f5517f94a714e3f737bfc75d
|
[
"MIT"
] | null | null | null |
import time
import datetime
from wtforms import fields, widgets
from flask_admin.babel import gettext
from flask_admin._compat import text_type, as_unicode
from . import widgets as admin_widgets
"""
An understanding of WTForms's Custom Widgets is helpful for understanding this code: http://wtforms.simplecodes.com/docs/0.6.2/widgets.html#custom-widgets
"""
__all__ = ['DateTimeField', 'TimeField', 'Select2Field', 'Select2TagsField']
class DateTimeField(fields.DateTimeField):
"""
Allows modifying the datetime format of a DateTimeField using form_args.
"""
widget = admin_widgets.DateTimePickerWidget()
def __init__(self, label=None, validators=None, format=None, **kwargs):
"""
Constructor
:param label:
Label
:param validators:
Field validators
:param format:
Format for text to date conversion. Defaults to '%Y-%m-%d %H:%M:%S'
:param kwargs:
Any additional parameters
"""
super(DateTimeField, self).__init__(label, validators, **kwargs)
self.format = format or '%Y-%m-%d %H:%M:%S'
class TimeField(fields.Field):
"""
A text field which stores a `datetime.time` object.
Accepts time string in multiple formats: 20:10, 20:10:00, 10:00 am, 9:30pm, etc.
"""
widget = admin_widgets.TimePickerWidget()
def __init__(self, label=None, validators=None, formats=None,
default_format=None, widget_format=None, **kwargs):
"""
Constructor
:param label:
Label
:param validators:
Field validators
:param formats:
Supported time formats, as a enumerable.
:param default_format:
Default time format. Defaults to '%H:%M:%S'
:param kwargs:
Any additional parameters
"""
super(TimeField, self).__init__(label, validators, **kwargs)
self.formats = formats or ('%H:%M:%S', '%H:%M',
'%I:%M:%S%p', '%I:%M%p',
'%I:%M:%S %p', '%I:%M %p')
self.default_format = default_format or '%H:%M:%S'
def _value(self):
if self.raw_data:
return u' '.join(self.raw_data)
elif self.data is not None:
return self.data.strftime(self.default_format)
else:
return u''
def process_formdata(self, valuelist):
if valuelist:
date_str = u' '.join(valuelist)
if date_str.strip():
for format in self.formats:
try:
timetuple = time.strptime(date_str, format)
self.data = datetime.time(timetuple.tm_hour,
timetuple.tm_min,
timetuple.tm_sec)
return
except ValueError:
pass
raise ValueError(gettext('Invalid time format'))
else:
self.data = None
class Select2Field(fields.SelectField):
"""
`Select2 <https://github.com/ivaynberg/select2>`_ styled select widget.
You must include select2.js, form-x.x.x.js and select2 stylesheet for it to
work.
"""
widget = admin_widgets.Select2Widget()
def __init__(self, label=None, validators=None, coerce=text_type,
choices=None, allow_blank=False, blank_text=None, **kwargs):
super(Select2Field, self).__init__(
label, validators, coerce, choices, **kwargs
)
self.allow_blank = allow_blank
self.blank_text = blank_text or ' '
def iter_choices(self):
if self.allow_blank:
yield (u'__None', self.blank_text, self.data is None)
for value, label in self.choices:
yield (value, label, self.coerce(value) == self.data)
def process_data(self, value):
if value is None:
self.data = None
else:
try:
self.data = self.coerce(value)
except (ValueError, TypeError):
self.data = None
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
try:
self.data = self.coerce(valuelist[0])
except ValueError:
raise ValueError(self.gettext(u'Invalid Choice: could not coerce'))
def pre_validate(self, form):
if self.allow_blank and self.data is None:
return
super(Select2Field, self).pre_validate(form)
class Select2TagsField(fields.StringField):
"""`Select2 <http://ivaynberg.github.com/select2/#tags>`_ styled text field.
You must include select2.js, form-x.x.x.js and select2 stylesheet for it to work.
"""
widget = admin_widgets.Select2TagsWidget()
def __init__(self, label=None, validators=None, save_as_list=False, coerce=text_type, **kwargs):
"""Initialization
:param save_as_list:
If `True` then populate ``obj`` using list else string
"""
self.save_as_list = save_as_list
self.coerce = coerce
super(Select2TagsField, self).__init__(label, validators, **kwargs)
def process_formdata(self, valuelist):
if self.save_as_list:
self.data = [self.coerce(v.strip()) for v in valuelist[0].split(',') if v.strip()]
else:
self.data = self.coerce(valuelist[0])
def _value(self):
if isinstance(self.data, (list, tuple)):
return u','.join(as_unicode(v) for v in self.data)
elif self.data:
return as_unicode(self.data)
else:
return u''
| 33.189944
| 154
| 0.565393
|
5919ff0cd55867e7bc034cdcab7d04cc95697592
| 39,381
|
py
|
Python
|
RecoEgamma/ElectronIdentification/python/Identification/cutBasedElectronID_tools.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 1
|
2018-07-25T03:57:34.000Z
|
2018-07-25T03:57:34.000Z
|
RecoEgamma/ElectronIdentification/python/Identification/cutBasedElectronID_tools.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 7
|
2016-07-17T02:34:54.000Z
|
2019-08-13T07:58:37.000Z
|
RecoEgamma/ElectronIdentification/python/Identification/cutBasedElectronID_tools.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 2
|
2019-09-27T08:33:22.000Z
|
2019-11-14T10:52:30.000Z
|
import FWCore.ParameterSet.Config as cms
# Barrel/endcap division in eta
ebCutOff = 1.479
# ===============================================
# Define containers used by cut definitions
# ===============================================
class IsolationCutInputs_V2:
"""
A container class that holds the name of the file with the effective
area constants for pile-up corrections
"""
def __init__(self,
isoEffAreas
):
self.isoEffAreas = isoEffAreas
class EleWorkingPoint_V1:
"""
This is a container class to hold numerical cut values for either
the barrel or endcap set of cuts for electron cut-based ID
"""
def __init__(self,
idName,
dEtaInCut,
dPhiInCut,
full5x5_sigmaIEtaIEtaCut,
hOverECut,
dxyCut,
dzCut,
absEInverseMinusPInverseCut,
relCombIsolationWithDBetaLowPtCut,
relCombIsolationWithDBetaHighPtCut,
# conversion veto cut needs no parameters, so not mentioned
missingHitsCut
):
self.idName = idName
self.dEtaInCut = dEtaInCut
self.dPhiInCut = dPhiInCut
self.full5x5_sigmaIEtaIEtaCut = full5x5_sigmaIEtaIEtaCut
self.hOverECut = hOverECut
self.dxyCut = dxyCut
self.dzCut = dzCut
self.absEInverseMinusPInverseCut = absEInverseMinusPInverseCut
self.relCombIsolationWithDBetaLowPtCut = relCombIsolationWithDBetaLowPtCut
self.relCombIsolationWithDBetaHighPtCut = relCombIsolationWithDBetaHighPtCut
# conversion veto cut needs no parameters, so not mentioned
self.missingHitsCut = missingHitsCut
class EleWorkingPoint_V2:
"""
This is a container class to hold numerical cut values for either
the barrel or endcap set of cuts for electron cut-based ID
"""
def __init__(self,
idName,
dEtaInCut,
dPhiInCut,
full5x5_sigmaIEtaIEtaCut,
hOverECut,
dxyCut,
dzCut,
absEInverseMinusPInverseCut,
relCombIsolationWithEALowPtCut,
relCombIsolationWithEAHighPtCut,
# conversion veto cut needs no parameters, so not mentioned
missingHitsCut
):
self.idName = idName
self.dEtaInCut = dEtaInCut
self.dPhiInCut = dPhiInCut
self.full5x5_sigmaIEtaIEtaCut = full5x5_sigmaIEtaIEtaCut
self.hOverECut = hOverECut
self.dxyCut = dxyCut
self.dzCut = dzCut
self.absEInverseMinusPInverseCut = absEInverseMinusPInverseCut
self.relCombIsolationWithEALowPtCut = relCombIsolationWithEALowPtCut
self.relCombIsolationWithEAHighPtCut = relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
self.missingHitsCut = missingHitsCut
class EleWorkingPoint_V3:
"""
This is a container class to hold numerical cut values for either
the barrel or endcap set of cuts for electron cut-based ID
With resepect to V2, the impact parameter cuts on dxy and dz are removed.
"""
def __init__(self,
idName,
dEtaInSeedCut,
dPhiInCut,
full5x5_sigmaIEtaIEtaCut,
hOverECut,
absEInverseMinusPInverseCut,
relCombIsolationWithEALowPtCut,
relCombIsolationWithEAHighPtCut,
# conversion veto cut needs no parameters, so not mentioned
missingHitsCut
):
self.idName = idName
self.dEtaInSeedCut = dEtaInSeedCut
self.dPhiInCut = dPhiInCut
self.full5x5_sigmaIEtaIEtaCut = full5x5_sigmaIEtaIEtaCut
self.hOverECut = hOverECut
self.absEInverseMinusPInverseCut = absEInverseMinusPInverseCut
self.relCombIsolationWithEALowPtCut = relCombIsolationWithEALowPtCut
self.relCombIsolationWithEAHighPtCut = relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
self.missingHitsCut = missingHitsCut
class EleWorkingPoint_V4:
"""
This is a container class to hold numerical cut values for either
the barrel or endcap set of cuts for electron cut-based ID
With respect to V3, the hOverE cut is made energy and pileup dependent as presented in
https://indico.cern.ch/event/662749/contributions/2763092/attachments/1545209/2425054/talk_electron_ID_2017.pdf
"""
def __init__(self,
idName,
dEtaInSeedCut,
dPhiInCut,
full5x5_sigmaIEtaIEtaCut,
hOverECut_C0,
hOverECut_CE,
hOverECut_Cr,
absEInverseMinusPInverseCut,
relCombIsolationWithEALowPtCut,
relCombIsolationWithEAHighPtCut,
# conversion veto cut needs no parameters, so not mentioned
missingHitsCut
):
self.idName = idName
self.dEtaInSeedCut = dEtaInSeedCut
self.dPhiInCut = dPhiInCut
self.full5x5_sigmaIEtaIEtaCut = full5x5_sigmaIEtaIEtaCut
self.hOverECut_C0 = hOverECut_C0
self.hOverECut_CE = hOverECut_CE
self.hOverECut_Cr = hOverECut_Cr
self.absEInverseMinusPInverseCut = absEInverseMinusPInverseCut
self.relCombIsolationWithEALowPtCut = relCombIsolationWithEALowPtCut
self.relCombIsolationWithEAHighPtCut = relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
self.missingHitsCut = missingHitsCut
class EleWorkingPoint_V5:
"""
This is a container class to hold numerical cut values for either
the barrel or endcap set of cuts for electron cut-based ID
With respect to V4, the isolation cut is made pt dependent as presented in the following meeting: https://indico.cern.ch/event/697079/
"""
def __init__(self,
idName,
dEtaInSeedCut,
dPhiInCut,
full5x5_sigmaIEtaIEtaCut,
hOverECut_C0,
hOverECut_CE,
hOverECut_Cr,
absEInverseMinusPInverseCut,
relCombIsolationWithEACut_C0,
relCombIsolationWithEACut_Cpt,
# conversion veto cut needs no parameters, so not mentioned
missingHitsCut
):
self.idName = idName
self.dEtaInSeedCut = dEtaInSeedCut
self.dPhiInCut = dPhiInCut
self.full5x5_sigmaIEtaIEtaCut = full5x5_sigmaIEtaIEtaCut
self.hOverECut_C0 = hOverECut_C0
self.hOverECut_CE = hOverECut_CE
self.hOverECut_Cr = hOverECut_Cr
self.absEInverseMinusPInverseCut = absEInverseMinusPInverseCut
self.relCombIsolationWithEACut_C0 = relCombIsolationWithEACut_C0
self.relCombIsolationWithEACut_Cpt = relCombIsolationWithEACut_Cpt
# conversion veto cut needs no parameters, so not mentioned
self.missingHitsCut = missingHitsCut
class EleHLTSelection_V1:
"""
This is a container class to hold numerical cut values for either
the barrel or endcap set of cuts for electron cut-based HLT-safe preselection
"""
def __init__(self,
idName,
full5x5_sigmaIEtaIEtaCut,
dEtaInSeedCut,
dPhiInCut,
hOverECut,
absEInverseMinusPInverseCut,
# isolations
ecalPFClusterIsoLowPtCut,
ecalPFClusterIsoHighPtCut,
hcalPFClusterIsoLowPtCut,
hcalPFClusterIsoHighPtCut,
trkIsoSlopeTerm,
trkIsoSlopeStart,
trkIsoConstTerm,
#
normalizedGsfChi2Cut
):
self.idName = idName
self.full5x5_sigmaIEtaIEtaCut = full5x5_sigmaIEtaIEtaCut
self.dEtaInSeedCut = dEtaInSeedCut
self.dPhiInCut = dPhiInCut
self.hOverECut = hOverECut
self.absEInverseMinusPInverseCut = absEInverseMinusPInverseCut
self.ecalPFClusterIsoLowPtCut = ecalPFClusterIsoLowPtCut
self.ecalPFClusterIsoHighPtCut = ecalPFClusterIsoHighPtCut
self.hcalPFClusterIsoLowPtCut = hcalPFClusterIsoLowPtCut
self.hcalPFClusterIsoHighPtCut = hcalPFClusterIsoHighPtCut
self.trkIsoSlopeTerm = trkIsoSlopeTerm
self.trkIsoSlopeStart = trkIsoSlopeStart
self.trkIsoConstTerm = trkIsoConstTerm
#
self.normalizedGsfChi2Cut = normalizedGsfChi2Cut
# ==============================================================
# Define individual cut configurations used by complete cut sets
# ==============================================================
# The mininum pt cut is set to 5 GeV
def psetMinPtCut():
return cms.PSet(
cutName = cms.string("MinPtCut"),
minPt = cms.double(5.0),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)
)
# Take all particles in the eta ranges 0-ebCutOff and ebCutOff-2.5
def psetPhoSCEtaMultiRangeCut():
return cms.PSet(
cutName = cms.string("GsfEleSCEtaMultiRangeCut"),
useAbsEta = cms.bool(True),
allowedEtaRanges = cms.VPSet(
cms.PSet( minEta = cms.double(0.0),
maxEta = cms.double(ebCutOff) ),
cms.PSet( minEta = cms.double(ebCutOff),
maxEta = cms.double(2.5) )
),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)
)
# Configure the cut on full5x5 sigmaIEtaIEta
def psetPhoFull5x5SigmaIEtaIEtaCut(wpEB, wpEE):
return cms.PSet(
cutName = cms.string('GsfEleFull5x5SigmaIEtaIEtaCut'),
full5x5SigmaIEtaIEtaCutValueEB = cms.double( wpEB.full5x5_sigmaIEtaIEtaCut ),
full5x5SigmaIEtaIEtaCutValueEE = cms.double( wpEE.full5x5_sigmaIEtaIEtaCut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)
)
# Configure the cut on dEta seed
def psetDEtaInSeedCut(wpEB, wpEE):
return cms.PSet(
cutName = cms.string('GsfEleDEtaInSeedCut'),
dEtaInSeedCutValueEB = cms.double( wpEB.dEtaInSeedCut ),
dEtaInSeedCutValueEE = cms.double( wpEE.dEtaInSeedCut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)
)
# Configure dPhiIn cut
def psetDPhiInCut(wpEB, wpEE):
return cms.PSet(
cutName = cms.string('GsfEleDPhiInCut'),
dPhiInCutValueEB = cms.double( wpEB.dPhiInCut ),
dPhiInCutValueEE = cms.double( wpEE.dPhiInCut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)
)
# Configure H/E cut
def psetHadronicOverEMCut(wpEB, wpEE):
return cms.PSet(
cutName = cms.string('GsfEleHadronicOverEMCut'),
hadronicOverEMCutValueEB = cms.double( wpEB.hOverECut ),
hadronicOverEMCutValueEE = cms.double( wpEE.hOverECut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)
)
# Configure energy and pileup dependent H/E cut
def psetHadronicOverEMEnergyScaledCut(wpEB, wpEE):
return cms.PSet(
cutName = cms.string('GsfEleHadronicOverEMEnergyScaledCut'),
barrelC0 = cms.double( wpEB.hOverECut_C0 ),
barrelCE = cms.double( wpEB.hOverECut_CE ),
barrelCr = cms.double( wpEB.hOverECut_Cr ),
endcapC0 = cms.double( wpEE.hOverECut_C0 ),
endcapCE = cms.double( wpEE.hOverECut_CE ),
endcapCr = cms.double( wpEE.hOverECut_Cr ),
rho = cms.InputTag("fixedGridRhoFastjetAll"),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(True),
isIgnored = cms.bool(False)
)
# Configure |1/E-1/p| cut
def psetEInerseMinusPInverseCut(wpEB, wpEE):
return cms.PSet(
cutName = cms.string('GsfEleEInverseMinusPInverseCut'),
eInverseMinusPInverseCutValueEB = cms.double( wpEB.absEInverseMinusPInverseCut ),
eInverseMinusPInverseCutValueEE = cms.double( wpEE.absEInverseMinusPInverseCut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)
)
# Configure ECAL PF Cluster isolation cut. Note that this cut requires
# effective area constants file as input
def psetEcalPFClusterIsoCut(wpEB, wpEE, ecalIsoInputs):
return cms.PSet(
cutName = cms.string('GsfEleCalPFClusterIsoCut'),
isoType = cms.int32( 0 ), # ECAL = 0, HCAL = 1, see cut class header for IsoType enum
isoCutEBLowPt = cms.double( wpEB.ecalPFClusterIsoLowPtCut ),
isoCutEBHighPt = cms.double( wpEB.ecalPFClusterIsoHighPtCut ),
isoCutEELowPt = cms.double( wpEE.ecalPFClusterIsoLowPtCut ),
isoCutEEHighPt = cms.double( wpEE.ecalPFClusterIsoHighPtCut ),
isRelativeIso = cms.bool(True),
ptCutOff = cms.double(20.0), # high pT above this value, low pT below
barrelCutOff = cms.double(ebCutOff),
rho = cms.InputTag("fixedGridRhoFastjetCentralCalo"), # This rho is best for emulation
# while HLT uses ...AllCalo
effAreasConfigFile = cms.FileInPath( ecalIsoInputs.isoEffAreas ),
needsAdditionalProducts = cms.bool(True),
isIgnored = cms.bool(False)
)
# Configure HCAL PF Cluster isolation cut. Note that this cut requires
# effective area constants file as input
def psetHcalPFClusterIsoCut(wpEB, wpEE, hcalIsoInputs):
return cms.PSet(
cutName = cms.string('GsfEleCalPFClusterIsoCut'),
isoType = cms.int32( 1 ), # ECAL = 0, HCAL = 1, see cut class header for IsoType enum
isoCutEBLowPt = cms.double( wpEB.hcalPFClusterIsoLowPtCut ),
isoCutEBHighPt = cms.double( wpEB.hcalPFClusterIsoHighPtCut ),
isoCutEELowPt = cms.double( wpEE.hcalPFClusterIsoLowPtCut ),
isoCutEEHighPt = cms.double( wpEE.hcalPFClusterIsoHighPtCut ),
isRelativeIso = cms.bool(True),
ptCutOff = cms.double(20.0), # high pT above this value, low pT below
barrelCutOff = cms.double(ebCutOff),
rho = cms.InputTag("fixedGridRhoFastjetCentralCalo"), # This rho is best for emulation
# while HLT uses ...AllCalo
effAreasConfigFile = cms.FileInPath( hcalIsoInputs.isoEffAreas ),
needsAdditionalProducts = cms.bool(True),
isIgnored = cms.bool(False)
)
# Configure tracker isolation cut
def psetTrkPtIsoCut(wpEB, wpEE):
return cms.PSet(
cutName = cms.string('GsfEleTrkPtIsoCut'),
# Three constants for the GsfEleTrkPtIsoCut
# cut = constTerm if Et < slopeStart
# cut = slopeTerm * (Et - slopeStart) + constTerm if Et >= slopeStart
slopeTermEB = cms.double( wpEB.trkIsoSlopeTerm ),
slopeTermEE = cms.double( wpEE.trkIsoSlopeTerm ),
slopeStartEB = cms.double( wpEB.trkIsoSlopeStart ),
slopeStartEE = cms.double( wpEE.trkIsoSlopeStart ),
constTermEB = cms.double( wpEB.trkIsoConstTerm ),
constTermEE = cms.double( wpEE.trkIsoConstTerm ),
useHEEPIso = cms.bool(False),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)
)
# Configure GsfTrack chi2/NDOF cut
def psetNormalizedGsfChi2Cut(wpEB, wpEE):
return cms.PSet(
cutName = cms.string('GsfEleNormalizedGsfChi2Cut'),
normalizedGsfChi2CutValueEB = cms.double( wpEB.normalizedGsfChi2Cut ),
normalizedGsfChi2CutValueEE = cms.double( wpEE.normalizedGsfChi2Cut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)
)
def psetEffAreaPFIsoCut(wpEB, wpEE, isoInputs):
return cms.PSet(
cutName = cms.string('GsfEleEffAreaPFIsoCut'),
isoCutEBLowPt = cms.double( wpEB.relCombIsolationWithEALowPtCut ),
isoCutEBHighPt = cms.double( wpEB.relCombIsolationWithEAHighPtCut ),
isoCutEELowPt = cms.double( wpEE.relCombIsolationWithEALowPtCut ),
isoCutEEHighPt = cms.double( wpEE.relCombIsolationWithEAHighPtCut ),
isRelativeIso = cms.bool(True),
ptCutOff = cms.double(20.0), # high pT above this value, low pT below
barrelCutOff = cms.double(ebCutOff),
rho = cms.InputTag("fixedGridRhoFastjetAll"),
effAreasConfigFile = cms.FileInPath( isoInputs.isoEffAreas ),
needsAdditionalProducts = cms.bool(True),
isIgnored = cms.bool(False)
)
def psetRelPFIsoScaledCut(wpEB, wpEE, isoInputs):
return cms.PSet(
cutName = cms.string('GsfEleRelPFIsoScaledCut'),
barrelC0 = cms.double(wpEB.relCombIsolationWithEACut_C0),
endcapC0 = cms.double(wpEE.relCombIsolationWithEACut_C0),
barrelCpt = cms.double(wpEB.relCombIsolationWithEACut_Cpt),
endcapCpt = cms.double(wpEE.relCombIsolationWithEACut_Cpt),
barrelCutOff = cms.double(ebCutOff),
rho = cms.InputTag("fixedGridRhoFastjetAll"),
effAreasConfigFile = cms.FileInPath( isoInputs.isoEffAreas ),
needsAdditionalProducts = cms.bool(True),
isIgnored = cms.bool(False)
)
def psetConversionVetoCut():
return cms.PSet(
cutName = cms.string('GsfEleConversionVetoCut'),
conversionSrc = cms.InputTag('allConversions'),
conversionSrcMiniAOD = cms.InputTag('reducedEgamma:reducedConversions'),
beamspotSrc = cms.InputTag('offlineBeamSpot'),
needsAdditionalProducts = cms.bool(True),
isIgnored = cms.bool(False)
)
def psetMissingHitsCut(wpEB, wpEE):
return cms.PSet(
cutName = cms.string('GsfEleMissingHitsCut'),
maxMissingHitsEB = cms.uint32( wpEB.missingHitsCut ),
maxMissingHitsEE = cms.uint32( wpEE.missingHitsCut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)
)
# -----------------------------
# Version V1 common definitions
# -----------------------------
# This cut set definition is in the old style, with everything configured
# in one go. It is kept to minimize changes. New definitions should use
# PSets defined above instead.
def configureVIDCutBasedEleID_V1( wpEB, wpEE ):
"""
This function configures the full cms.PSet for a VID ID and returns it.
The inputs: two objects of the type WorkingPoint_V1, one
containing the cuts for the Barrel (EB) and the other one for the Endcap (EE).
"""
# print "VID: Configuring cut set %s" % wpEB.idName
parameterSet = cms.PSet(
#
idName = cms.string( wpEB.idName ), # same name stored in the _EB and _EE objects
cutFlow = cms.VPSet(
cms.PSet( cutName = cms.string("MinPtCut"),
minPt = cms.double(5.0),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False) ),
cms.PSet( cutName = cms.string("GsfEleSCEtaMultiRangeCut"),
useAbsEta = cms.bool(True),
allowedEtaRanges = cms.VPSet(
cms.PSet( minEta = cms.double(0.0),
maxEta = cms.double(ebCutOff) ),
cms.PSet( minEta = cms.double(ebCutOff),
maxEta = cms.double(2.5) )
),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleDEtaInCut'),
dEtaInCutValueEB = cms.double( wpEB.dEtaInCut ),
dEtaInCutValueEE = cms.double( wpEE.dEtaInCut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleDPhiInCut'),
dPhiInCutValueEB = cms.double( wpEB.dPhiInCut ),
dPhiInCutValueEE = cms.double( wpEE.dPhiInCut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleFull5x5SigmaIEtaIEtaCut'),
full5x5SigmaIEtaIEtaCutValueEB = cms.double( wpEB.full5x5_sigmaIEtaIEtaCut ),
full5x5SigmaIEtaIEtaCutValueEE = cms.double( wpEE.full5x5_sigmaIEtaIEtaCut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleHadronicOverEMCut'),
hadronicOverEMCutValueEB = cms.double( wpEB.hOverECut ),
hadronicOverEMCutValueEE = cms.double( wpEE.hOverECut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleDxyCut'),
dxyCutValueEB = cms.double( wpEB.dxyCut ),
dxyCutValueEE = cms.double( wpEE.dxyCut ),
vertexSrc = cms.InputTag("offlinePrimaryVertices"),
vertexSrcMiniAOD = cms.InputTag("offlineSlimmedPrimaryVertices"),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(True),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleDzCut'),
dzCutValueEB = cms.double( wpEB.dzCut ),
dzCutValueEE = cms.double( wpEE.dzCut ),
vertexSrc = cms.InputTag("offlinePrimaryVertices"),
vertexSrcMiniAOD = cms.InputTag("offlineSlimmedPrimaryVertices"),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(True),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleEInverseMinusPInverseCut'),
eInverseMinusPInverseCutValueEB = cms.double( wpEB.absEInverseMinusPInverseCut ),
eInverseMinusPInverseCutValueEE = cms.double( wpEE.absEInverseMinusPInverseCut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleDeltaBetaIsoCutStandalone'),
isoCutEBLowPt = cms.double( wpEB.relCombIsolationWithDBetaLowPtCut ),
isoCutEBHighPt = cms.double( wpEB.relCombIsolationWithDBetaHighPtCut ),
isoCutEELowPt = cms.double( wpEE.relCombIsolationWithDBetaLowPtCut ),
isoCutEEHighPt = cms.double( wpEE.relCombIsolationWithDBetaHighPtCut ),
isRelativeIso = cms.bool(True),
deltaBetaConstant = cms.double(0.5),
ptCutOff = cms.double(20.0), # high pT above this value, low pT below
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleConversionVetoCut'),
conversionSrc = cms.InputTag('allConversions'),
conversionSrcMiniAOD = cms.InputTag('reducedEgamma:reducedConversions'),
beamspotSrc = cms.InputTag('offlineBeamSpot'),
needsAdditionalProducts = cms.bool(True),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleMissingHitsCut'),
maxMissingHitsEB = cms.uint32( wpEB.missingHitsCut ),
maxMissingHitsEE = cms.uint32( wpEE.missingHitsCut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False) ),
)
)
#
return parameterSet
# -----------------------------
# Version V2 common definitions
# -----------------------------
# This cut set definition is in the old style, with everything configured
# in one go. It is kept to minimize changes. New definitions should use
# PSets defined above instead.
def configureVIDCutBasedEleID_V2( wpEB, wpEE, isoInputs ):
"""
This function configures the full cms.PSet for a VID ID and returns it.
The inputs: two objects of the type WorkingPoint_V2, one
containing the cuts for the Barrel (EB) and the other one for the Endcap (EE).
The third argument is an object that contains information necessary
for isolation calculations.
"""
# print "VID: Configuring cut set %s" % wpEB.idName
parameterSet = cms.PSet(
#
idName = cms.string( wpEB.idName ), # same name stored in the _EB and _EE objects
cutFlow = cms.VPSet(
cms.PSet( cutName = cms.string("MinPtCut"),
minPt = cms.double(5.0),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False) ),
cms.PSet( cutName = cms.string("GsfEleSCEtaMultiRangeCut"),
useAbsEta = cms.bool(True),
allowedEtaRanges = cms.VPSet(
cms.PSet( minEta = cms.double(0.0),
maxEta = cms.double(ebCutOff) ),
cms.PSet( minEta = cms.double(ebCutOff),
maxEta = cms.double(2.5) )
),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleDEtaInCut'),
dEtaInCutValueEB = cms.double( wpEB.dEtaInCut ),
dEtaInCutValueEE = cms.double( wpEE.dEtaInCut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleDPhiInCut'),
dPhiInCutValueEB = cms.double( wpEB.dPhiInCut ),
dPhiInCutValueEE = cms.double( wpEE.dPhiInCut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleFull5x5SigmaIEtaIEtaCut'),
full5x5SigmaIEtaIEtaCutValueEB = cms.double( wpEB.full5x5_sigmaIEtaIEtaCut ),
full5x5SigmaIEtaIEtaCutValueEE = cms.double( wpEE.full5x5_sigmaIEtaIEtaCut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleHadronicOverEMCut'),
hadronicOverEMCutValueEB = cms.double( wpEB.hOverECut ),
hadronicOverEMCutValueEE = cms.double( wpEE.hOverECut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleDxyCut'),
dxyCutValueEB = cms.double( wpEB.dxyCut ),
dxyCutValueEE = cms.double( wpEE.dxyCut ),
vertexSrc = cms.InputTag("offlinePrimaryVertices"),
vertexSrcMiniAOD = cms.InputTag("offlineSlimmedPrimaryVertices"),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(True),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleDzCut'),
dzCutValueEB = cms.double( wpEB.dzCut ),
dzCutValueEE = cms.double( wpEE.dzCut ),
vertexSrc = cms.InputTag("offlinePrimaryVertices"),
vertexSrcMiniAOD = cms.InputTag("offlineSlimmedPrimaryVertices"),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(True),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleEInverseMinusPInverseCut'),
eInverseMinusPInverseCutValueEB = cms.double( wpEB.absEInverseMinusPInverseCut ),
eInverseMinusPInverseCutValueEE = cms.double( wpEE.absEInverseMinusPInverseCut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleEffAreaPFIsoCut'),
isoCutEBLowPt = cms.double( wpEB.relCombIsolationWithEALowPtCut ),
isoCutEBHighPt = cms.double( wpEB.relCombIsolationWithEAHighPtCut ),
isoCutEELowPt = cms.double( wpEE.relCombIsolationWithEALowPtCut ),
isoCutEEHighPt = cms.double( wpEE.relCombIsolationWithEAHighPtCut ),
isRelativeIso = cms.bool(True),
ptCutOff = cms.double(20.0), # high pT above this value, low pT below
barrelCutOff = cms.double(ebCutOff),
rho = cms.InputTag("fixedGridRhoFastjetAll"),
effAreasConfigFile = cms.FileInPath( isoInputs.isoEffAreas ),
needsAdditionalProducts = cms.bool(True),
isIgnored = cms.bool(False) ),
cms.PSet( cutName = cms.string('GsfEleConversionVetoCut'),
conversionSrc = cms.InputTag('allConversions'),
conversionSrcMiniAOD = cms.InputTag('reducedEgamma:reducedConversions'),
beamspotSrc = cms.InputTag('offlineBeamSpot'),
needsAdditionalProducts = cms.bool(True),
isIgnored = cms.bool(False)),
cms.PSet( cutName = cms.string('GsfEleMissingHitsCut'),
maxMissingHitsEB = cms.uint32( wpEB.missingHitsCut ),
maxMissingHitsEE = cms.uint32( wpEE.missingHitsCut ),
barrelCutOff = cms.double(ebCutOff),
needsAdditionalProducts = cms.bool(False),
isIgnored = cms.bool(False) ),
)
)
#
return parameterSet
# ==============================================================
# Define the complete cut sets
# ==============================================================
def configureVIDCutBasedEleID_V3( wpEB, wpEE, isoInputs ):
"""
This function configures the full cms.PSet for a VID ID and returns it.
The inputs: two objects of the type WorkingPoint_V3, one
containing the cuts for the Barrel (EB) and the other one for the Endcap (EE).
The third argument is an object that contains information necessary
for isolation calculations.
In this version, the impact parameter cuts dxy and dz are not present
"""
# print "VID: Configuring cut set %s" % wpEB.idName
parameterSet = cms.PSet(
#
idName = cms.string( wpEB.idName ), # same name stored in the _EB and _EE objects
cutFlow = cms.VPSet(
psetMinPtCut(),
psetPhoSCEtaMultiRangeCut(), # eta cut
psetDEtaInSeedCut(wpEB, wpEE), # dEtaIn seed cut
psetDPhiInCut(wpEB, wpEE), # dPhiIn cut
psetPhoFull5x5SigmaIEtaIEtaCut(wpEB, wpEE), # full 5x5 sigmaIEtaIEta cut
psetHadronicOverEMCut(wpEB, wpEE), # H/E cut
psetEInerseMinusPInverseCut(wpEB, wpEE), # |1/e-1/p| cut
psetEffAreaPFIsoCut(wpEB, wpEE, isoInputs), # rel. comb. PF isolation cut
psetConversionVetoCut(),
psetMissingHitsCut(wpEB, wpEE)
)
)
#
return parameterSet
def configureVIDCutBasedEleID_V4( wpEB, wpEE, isoInputs ):
"""
This function configures the full cms.PSet for a VID ID and returns it.
The inputs: two objects of the type WorkingPoint_V3, one
containing the cuts for the Barrel (EB) and the other one for the Endcap (EE).
The third argument is an object that contains information necessary
for isolation calculations.
In this version, the energy and pileup dependent hOverE is introduced
"""
# print "VID: Configuring cut set %s" % wpEB.idName
parameterSet = cms.PSet(
#
idName = cms.string( wpEB.idName ), # same name stored in the _EB and _EE objects
cutFlow = cms.VPSet(
psetMinPtCut(),
psetPhoSCEtaMultiRangeCut(), # eta cut
psetDEtaInSeedCut(wpEB, wpEE), # dEtaIn seed cut
psetDPhiInCut(wpEB, wpEE), # dPhiIn cut
psetPhoFull5x5SigmaIEtaIEtaCut(wpEB, wpEE), # full 5x5 sigmaIEtaIEta cut
psetHadronicOverEMEnergyScaledCut(wpEB, wpEE), # H/E cut
psetEInerseMinusPInverseCut(wpEB, wpEE), # |1/e-1/p| cut
psetEffAreaPFIsoCut(wpEB, wpEE, isoInputs), # rel. comb. PF isolation cut
psetConversionVetoCut(),
psetMissingHitsCut(wpEB, wpEE)
)
)
#
return parameterSet
def configureVIDCutBasedEleID_V5( wpEB, wpEE, isoInputs ):
"""
This function configures the full cms.PSet for a VID ID and returns it.
The inputs: two objects of the type WorkingPoint_V3, one
containing the cuts for the Barrel (EB) and the other one for the Endcap (EE).
The third argument is an object that contains information necessary
for isolation calculations.
In this version, the pt dependent isolation is introduced
"""
# print "VID: Configuring cut set %s" % wpEB.idName
parameterSet = cms.PSet(
#
idName = cms.string( wpEB.idName ), # same name stored in the _EB and _EE objects
cutFlow = cms.VPSet(
psetMinPtCut(),
psetPhoSCEtaMultiRangeCut(), # eta cut
psetDEtaInSeedCut(wpEB, wpEE), # dEtaIn seed cut
psetDPhiInCut(wpEB, wpEE), # dPhiIn cut
psetPhoFull5x5SigmaIEtaIEtaCut(wpEB, wpEE), # full 5x5 sigmaIEtaIEta cut
psetHadronicOverEMEnergyScaledCut(wpEB, wpEE), # H/E cut
psetEInerseMinusPInverseCut(wpEB, wpEE), # |1/e-1/p| cut
psetRelPFIsoScaledCut(wpEB, wpEE, isoInputs), # rel. comb. PF isolation cut
psetConversionVetoCut(),
psetMissingHitsCut(wpEB, wpEE)
)
)
#
return parameterSet
# -----------------------------
# HLT-safe common definitions
# -----------------------------
def configureVIDCutBasedEleHLTPreselection_V1( wpEB, wpEE, ecalIsoInputs, hcalIsoInputs ):
"""
This function configures the full cms.PSet for a VID ID and returns it.
The inputs: two objects of the type EleHLTSelection_V1, one
containing the cuts for the Barrel (EB) and the other one for the Endcap (EE).
The third and fourth arguments are objects that contain information necessary
for isolation calculations for ECAL and HCAL.
"""
# print "VID: Configuring cut set %s" % wpEB.idName
parameterSet = cms.PSet(
idName = cms.string( wpEB.idName ), # same name stored in the _EB and _EE objects
cutFlow = cms.VPSet(
psetMinPtCut(), # min pt cut
psetPhoSCEtaMultiRangeCut(), # eta cut
psetPhoFull5x5SigmaIEtaIEtaCut(wpEB, wpEE), # full 5x5 sigmaIEtaIEta cut
psetDEtaInSeedCut(wpEB, wpEE), # dEtaIn seed cut
psetDPhiInCut(wpEB, wpEE), # dPhiIn cut
psetHadronicOverEMCut(wpEB, wpEE), # H/E cut
psetEInerseMinusPInverseCut(wpEB, wpEE), # |1/e-1/p| cut
psetEcalPFClusterIsoCut(wpEB, wpEE, ecalIsoInputs), # ECAL PF Cluster isolation
psetHcalPFClusterIsoCut(wpEB, wpEE, hcalIsoInputs), # HCAL PF Cluster isolation
psetTrkPtIsoCut(wpEB, wpEE), # tracker isolation cut
psetNormalizedGsfChi2Cut(wpEB, wpEE) # GsfTrack chi2/NDOF cut
)
)
#
return parameterSet
| 49.912548
| 138
| 0.578121
|
6f8a3926ef3a535b326347b99b45ba54376dff74
| 109
|
py
|
Python
|
trobafeina/views.py
|
n0ss4/trobafeina
|
4734a6459e423ddf051e98beb39072451ddb476f
|
[
"MIT"
] | null | null | null |
trobafeina/views.py
|
n0ss4/trobafeina
|
4734a6459e423ddf051e98beb39072451ddb476f
|
[
"MIT"
] | null | null | null |
trobafeina/views.py
|
n0ss4/trobafeina
|
4734a6459e423ddf051e98beb39072451ddb476f
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
def index(request):
return render(request, 'JobOffer/index.html')
| 15.571429
| 49
| 0.752294
|
60bcc6eab0ef4252cd064e641268b21f839dda73
| 11,709
|
py
|
Python
|
parser.py
|
PhilippThoelke/protein-folding
|
322483c60119c963574232d5b14d898166a92a15
|
[
"MIT"
] | 8
|
2020-05-20T14:33:08.000Z
|
2022-02-16T20:09:26.000Z
|
parser.py
|
PhilippThoelke/protein-folding
|
322483c60119c963574232d5b14d898166a92a15
|
[
"MIT"
] | 1
|
2022-03-18T09:13:52.000Z
|
2022-03-18T20:31:55.000Z
|
parser.py
|
PhilippThoelke/protein-folding
|
322483c60119c963574232d5b14d898166a92a15
|
[
"MIT"
] | 2
|
2021-05-23T19:15:42.000Z
|
2022-03-18T09:26:47.000Z
|
import xml.etree.ElementTree as ElementTree
import tensorflow as tf
import numpy as np
class Force:
def get_weighting(self):
return 1
class HarmonicBondForce(Force):
def __init__(self, atom1, atom2, length, k):
self.atom1 = atom1
self.atom2 = atom2
self.length = tf.constant(length)
self.k = tf.constant(k)
def get_weighting(self):
return self.k
def __call__(self):
return HarmonicBondForce._call(self.atom1.pos, self.atom2.pos, self.length, self.k)
@tf.function
def _call(pos1, pos2, length, k):
return k * (tf.norm(pos1 - pos2) - length) ** 2
def __repr__(self):
return f'HarmonicBondForce(between {self.atom1.name} and {self.atom2.name}, length is {self.length})'
class HarmonicAngleForce(Force):
def __init__(self, atom1, atom2, atom3, angle, k):
self.atom1 = atom1
self.atom2 = atom2
self.atom3 = atom3
# use angle - pi as the actual target angle such that angle=0 is straight and angle=pi is right angle
self.angle = tf.constant(angle) - np.pi
self.angle *= np.sign(self.angle)
self.k = tf.constant(k)
def get_weighting(self):
return self.k
def __call__(self):
return HarmonicAngleForce._call(self.atom1.pos, self.atom2.pos, self.atom3.pos, self.angle, self.k)
@tf.function
def _call(pos1, pos2, pos3, angle, k):
side1 = pos1 - pos2
side2 = pos3 - pos2
cosine_angle = tf.tensordot(side1, side2, 1) / (tf.norm(side1) * tf.norm(side2))
if cosine_angle >= 1:
# acos(x) is not defined for x>1 and gradient is -inf for x=1 (returning 0 here destroys the gradient for this force)
return tf.constant(0, dtype=tf.float32)
ang = tf.math.acos(cosine_angle)
return k * (ang - angle) ** 2
def __repr__(self):
return f'HarmonicAngleForce(between {self.atom1.name}, {self.atom2.name} and {self.atom3.name}, angle is {self.angle})'
class NonBondedForce(Force):
def __init__(self, atom1, atom2):
self.atom1 = atom1
self.atom2 = atom2
self.epsilon = tf.constant(4 * tf.math.sqrt(self.atom1.epsilon * self.atom2.epsilon), dtype=tf.float32)
sigma = (self.atom1.sigma + self.atom2.sigma) / 2
self.sigma6 = tf.constant(sigma ** 6, dtype=tf.float32)
self.sigma12 = tf.constant(sigma ** 12, dtype=tf.float32)
self.charge = tf.constant(self.atom1.charge * self.atom2.charge)
def get_weighting(self):
return self.epsilon
def __call__(self):
return NonBondedForce._call(self.atom1.pos, self.atom2.pos, self.epsilon, self.sigma6, self.sigma12, self.charge)
@tf.function
def _call(pos1, pos2, epsilon, sigma6, sigma12, charge, cutoff=0.6):
# calculation of r should probably not just use the positions but also the contact radii
r_sq = tf.reduce_sum((pos2 - pos1) ** 2)
if r_sq > cutoff ** 2:
return tf.constant(0, dtype=tf.float32)
return epsilon * (sigma12 / r_sq ** 6 - sigma6 / r_sq ** 3) + charge / tf.math.sqrt(r_sq)
class Atom:
def __init__(self, name, element, atom_class, type_id, mass, charge, sigma, epsilon, pos=None):
self.name = name
self.element = element
self.atom_class = atom_class
self.type_id = type_id
self.mass = mass
self.charge = charge
self.sigma = sigma
self.epsilon = epsilon
if pos is None:
self.pos = tf.Variable(tf.random.uniform(shape=(3,)))
elif type(pos) == float or type(pos) == int:
self.pos = tf.Variable(tf.random.uniform(minval=0 + pos, maxval=1 + pos, shape=(3,)))
else:
self.pos = tf.Variable(pos)
def __repr__(self):
return f'Atom({self.name}: element {self.element} with mass {self.mass})'
class Residue:
def __init__(self, name, forcefield='forcefields/amber99sb.xml', add_hydrogens=False, add_oxygen=False, his_replacement='HID', add_non_bonded=True, index=0):
self.index = index
# parse a mapping from single letter amino acid codes to three letter abbreviations
mappings = ('ala:A|arg:R|asn:N|asp:D|cys:C|gln:Q|glu:E|gly:G|his:H|ile:I|'
+ 'leu:L|lys:K|met:M|phe:F|pro:P|ser:S|thr:T|trp:W|tyr:Y|val:V').upper().split('|')
letter2aa = dict([m.split(':')[::-1] for m in mappings])
# figure out the 3 letter amino acid abbreviation from the name parameter
if len(name) == 1:
self.name = letter2aa[name]
else:
self.name = name
if add_hydrogens and add_oxygen:
# theoretically it's possible (I think) but the AMBER forcefield doesn't list this directly
raise ValueError('Can\'t add hydrogens and oxygen to the same residue')
# Histidine (HIS, H) is either one of HID, HIE or HIP in AMBER
if self.name == 'HIS':
self.name = his_replacement
if add_hydrogens:
self.name = 'N' + self.name
if add_oxygen:
self.name = 'C' + self.name
# load the forcefield xml and store the root element
if type(forcefield) == str:
self.forcefield = ElementTree.parse(forcefield).getroot()
elif type(forcefield) == ElementTree.ElementTree:
self.forcefield = forcefield.getroot()
elif type(forcefield) == ElementTree.Element:
self.forcefield = forcefield
else:
raise ValueError(f'Forcefield type {type(forcefield)} not supported')
self.atoms = []
self.bonds = []
self.external_bond_indices = []
# load all atomic attributes for this residue from the forecefield and store atomic bonds
for obj in self.forcefield.find(f'Residues/Residue[@name=\'{self.name}\']'):
if obj.tag == 'Atom':
self.atoms.append(self._get_atom(obj))
elif obj.tag == 'Bond':
self.bonds.append(self._get_bond(obj))
elif obj.tag == 'ExternalBond':
self.external_bond_indices.append(self._get_external_bond(obj))
else:
print(f'Unsupported type {obj.type}')
# get the harmonic bond forces between atoms
self.harmonic_bond_forces = []
for bond in self.bonds:
a1 = bond[0]
a2 = bond[1]
search_options = [(a1.atom_class, a2.atom_class),
(a2.atom_class, a1.atom_class)]
for option in search_options:
force = self._get_harmonic_bond_force(*option)
if force is not None:
break
if force is not None:
self.harmonic_bond_forces.append(HarmonicBondForce(a1, a2, float(force.get('length')), float(force.get('k'))))
else:
print(f'No harmonic bond force found for {a1.name} and {a2.name}')
# get the harmonic angle forces between atoms
self.harmonic_angle_forces = []
for i, a1 in enumerate(self.atoms):
for j, a2 in enumerate(self.atoms[i+1:]):
for k, a3 in enumerate(self.atoms[i+j+2:]):
search_options = [(a1.atom_class, a2.atom_class, a3.atom_class),
(a3.atom_class, a2.atom_class, a1.atom_class)]
for option in search_options:
force = self._get_harmonic_angle_force(*option)
if force is not None:
break
if force is not None:
self.harmonic_angle_forces.append(HarmonicAngleForce(a1, a2, a3, float(force.get('angle')), float(force.get('k'))))
# get non-bonded forces for all atoms
self.nonbonded_forces = []
if add_non_bonded:
for i, a1 in enumerate(self.atoms):
for a2 in self.atoms[i+1:]:
self.nonbonded_forces.append(NonBondedForce(a1, a2))
def _get_atom(self, xml_element):
# extract the attributes of an atom from the forcefield
name = xml_element.get('name')
type_id = int(xml_element.get('type'))
atom_traits = self.forcefield[0][type_id].attrib
atom_class = atom_traits['class']
element = atom_traits['element']
mass = float(atom_traits['mass'])
nonbonded_traits = self.forcefield[5][type_id].attrib
charge = float(nonbonded_traits.get('charge'))
sigma = float(nonbonded_traits.get('sigma'))
epsilon = float(nonbonded_traits.get('epsilon'))
return Atom(name, element, atom_class, type_id, mass, charge, sigma, epsilon, pos=self.index)
def _get_bond(self, xml_element):
# extract the indices of two bonded atoms from the forcefield
attribs = xml_element.attrib
return [self.atoms[int(attribs['from'])], self.atoms[int(attribs['to'])]]
def _get_external_bond(self, xml_element):
# extract the index of an atom with an external bond from the forcefield
return int(xml_element.attrib['from'])
def _get_harmonic_bond_force(self, name1, name2):
return self.forcefield.find(f'HarmonicBondForce/Bond[@class1=\'{name1}\'][@class2=\'{name2}\']')
def _get_harmonic_angle_force(self, name1, name2, name3):
return self.forcefield.find(f'HarmonicAngleForce/Angle[@class1=\'{name1}\'][@class2=\'{name2}\'][@class3=\'{name3}\']')
def get_atom_count(self):
return len(self.atoms)
def get_bond_count(self):
return len(self.bonds)
def get_atoms(self):
return self.atoms
def get_forces(self):
return self.harmonic_bond_forces + self.harmonic_angle_forces + self.nonbonded_forces
def get_variables(self):
return [atom.pos for atom in self.atoms]
def get_energy(self, normalize=False):
forces = self.get_forces()
if normalize:
ks = sum([force.get_weighting() for force in forces])
else:
ks = 1
return sum([force() for force in forces]) / ks
def get_mass(self):
return sum([atom.mass for atom in self.atoms])
def __repr__(self):
return f'Residue({self.name}: {self.get_atom_count()} atoms, {self.get_bond_count()} bonds)'
class Chain:
def __init__(self, sequence, forcefield='forcefields/amber99sb.xml'):
if len(sequence) == 1:
raise ValueError('Must have at least two amino acids to form a chain')
self.sequence = sequence
self.residues = []
# generate residues from the amino acid sequence
for i, aa in enumerate(self.sequence):
self.residues.append(Residue(aa, forcefield, add_non_bonded=False, add_hydrogens=(i == 0), add_oxygen=(i == len(self.sequence) - 1), index=i))
self.external_bonds = []
# store the atoms which have external bonds, reaching from one residue to another
for i in range(1, len(self.residues)):
idx1 = self.residues[i-1].external_bond_indices[-1]
idx2 = self.residues[i].external_bond_indices[0]
self.external_bonds.append([self.residues[i-1].atoms[idx1], self.residues[i].atoms[idx2]])
self.external_harmonic_bond_forces = []
# get the harmonic bond forces between atoms with external bonds
for bond in self.external_bonds:
a1 = bond[0]
a2 = bond[1]
search_options = [(a1.atom_class, a2.atom_class),
(a2.atom_class, a1.atom_class)]
for option in search_options:
force = self.residues[0]._get_harmonic_bond_force(*option)
if force is not None:
break
if force is not None:
self.external_harmonic_bond_forces.append(HarmonicBondForce(a1, a2, float(force.get('length')), float(force.get('k'))))
# get non-bonded forces for all pairs of atoms
self.nonbonded_forces = []
atoms = self.get_atoms()
for i, a1 in enumerate(atoms):
for a2 in atoms[i+1:]:
self.nonbonded_forces.append(NonBondedForce(a1, a2))
def get_atom_count(self):
return sum([res.get_atom_count() for res in self.residues])
def get_bond_count(self):
return sum([res.get_bond_count() for res in self.residues]) + len(self.external_bonds)
def get_atoms(self):
return sum([res.atoms for res in self.residues], [])
def get_bonds(self):
return sum([res.bonds for res in self.residues], []) + self.external_bonds
def get_forces(self):
return sum([res.get_forces() for res in self.residues], []) + self.external_harmonic_bond_forces + self.nonbonded_forces
def get_energy(self, normalize=False):
forces = self.get_forces()
if normalize:
ks = sum([force.get_weighting() for force in forces])
else:
ks = 1
return sum([force() for force in forces]) / ks
def get_variables(self):
return sum([res.get_variables() for res in self.residues], [])
def get_mass(self):
return sum([res.get_mass() for res in self.residues])
def __repr__(self):
return f'Chain({len(self.residues)} residues, {self.get_atom_count()} atoms, {self.get_bond_count()} bonds)'
if __name__ == '__main__':
chain = Chain('QED')
print(chain)
print(chain.get_energy())
| 35.268072
| 158
| 0.707405
|
202fdcd1dcf5f68f9e9064aa7006f456e6131ad9
| 160
|
py
|
Python
|
virtual/bin/django-admin.py
|
BrilliantGrant/Transaction-application
|
988fdbd6ed8a1fea9ca8366eeb2b30275b727836
|
[
"MIT",
"Unlicense"
] | null | null | null |
virtual/bin/django-admin.py
|
BrilliantGrant/Transaction-application
|
988fdbd6ed8a1fea9ca8366eeb2b30275b727836
|
[
"MIT",
"Unlicense"
] | null | null | null |
virtual/bin/django-admin.py
|
BrilliantGrant/Transaction-application
|
988fdbd6ed8a1fea9ca8366eeb2b30275b727836
|
[
"MIT",
"Unlicense"
] | null | null | null |
#!/home/bri/Desktop/Transaction/virtual/bin/python3.6
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 26.666667
| 53
| 0.7875
|
dfc2cbe31675277049bb67fb563e754c40bed59c
| 8,477
|
py
|
Python
|
test/functional/test_framework/key.py
|
RPGCoin/RPG-Core
|
bf0efd6f3d4426e11d195efe5aee7b7acabd9a3d
|
[
"MIT"
] | 5
|
2019-02-19T10:27:59.000Z
|
2021-03-22T03:59:19.000Z
|
test/functional/test_framework/key.py
|
Matthelonianxl/RPG-Core
|
bf0efd6f3d4426e11d195efe5aee7b7acabd9a3d
|
[
"MIT"
] | 2
|
2019-02-19T09:46:38.000Z
|
2019-02-25T19:09:41.000Z
|
test/functional/test_framework/key.py
|
Matthelonianxl/RPG-Core
|
bf0efd6f3d4426e11d195efe5aee7b7acabd9a3d
|
[
"MIT"
] | 2
|
2019-08-03T11:43:48.000Z
|
2020-04-29T07:19:13.000Z
|
# Copyright (c) 2011 Sam Rushing
"""ECC secp256k1 OpenSSL wrapper.
WARNING: This module does not mlock() secrets; your private keys may end up on
disk in swap! Use with caution!
This file is modified from python-rpglib.
"""
import ctypes
import ctypes.util
import hashlib
import sys
ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32')
ssl.BN_new.restype = ctypes.c_void_p
ssl.BN_new.argtypes = []
ssl.BN_bin2bn.restype = ctypes.c_void_p
ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]
ssl.BN_CTX_free.restype = None
ssl.BN_CTX_free.argtypes = [ctypes.c_void_p]
ssl.BN_CTX_new.restype = ctypes.c_void_p
ssl.BN_CTX_new.argtypes = []
ssl.ECDH_compute_key.restype = ctypes.c_int
ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_sign.restype = ctypes.c_int
ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_verify.restype = ctypes.c_int
ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
ssl.EC_KEY_free.restype = None
ssl.EC_KEY_free.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
ssl.EC_KEY_get0_group.restype = ctypes.c_void_p
ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p
ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_set_private_key.restype = ctypes.c_int
ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_KEY_set_conv_form.restype = None
ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int]
ssl.EC_KEY_set_public_key.restype = ctypes.c_int
ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.i2o_ECPublicKey.restype = ctypes.c_void_p
ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_POINT_new.restype = ctypes.c_void_p
ssl.EC_POINT_new.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_free.restype = None
ssl.EC_POINT_free.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_mul.restype = ctypes.c_int
ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
# this specifies the curve used with ECDSA.
NID_secp256k1 = 714 # from openssl/obj_mac.h
SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2
# Thx to Sam Devlin for the ctypes magic 64-bit fix.
def _check_result(val, func, args):
if val == 0:
raise ValueError
else:
return ctypes.c_void_p (val)
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.errcheck = _check_result
class CECKey():
"""Wrapper around OpenSSL's EC_KEY"""
POINT_CONVERSION_COMPRESSED = 2
POINT_CONVERSION_UNCOMPRESSED = 4
def __init__(self):
self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1)
def __del__(self):
if ssl:
ssl.EC_KEY_free(self.k)
self.k = None
def set_secretbytes(self, secret):
priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new())
group = ssl.EC_KEY_get0_group(self.k)
pub_key = ssl.EC_POINT_new(group)
ctx = ssl.BN_CTX_new()
if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx):
raise ValueError("Could not derive public key from the supplied secret.")
ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)
ssl.EC_KEY_set_private_key(self.k, priv_key)
ssl.EC_KEY_set_public_key(self.k, pub_key)
ssl.EC_POINT_free(pub_key)
ssl.BN_CTX_free(ctx)
return self.k
def set_privkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def set_pubkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def get_privkey(self):
size = ssl.i2d_ECPrivateKey(self.k, 0)
mb_pri = ctypes.create_string_buffer(size)
ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))
return mb_pri.raw
def get_pubkey(self):
size = ssl.i2o_ECPublicKey(self.k, 0)
mb = ctypes.create_string_buffer(size)
ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))
return mb.raw
def get_raw_ecdh_key(self, other_pubkey):
ecdh_keybuffer = ctypes.create_string_buffer(32)
r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32,
ssl.EC_KEY_get0_public_key(other_pubkey.k),
self.k, 0)
if r != 32:
raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed')
return ecdh_keybuffer.raw
def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):
# FIXME: be warned it's not clear what the kdf should be as a default
r = self.get_raw_ecdh_key(other_pubkey)
return kdf(r)
def sign(self, hash, low_s = True):
# FIXME: need unit tests for below cases
if not isinstance(hash, bytes):
raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)
if len(hash) != 32:
raise ValueError('Hash must be exactly 32 bytes long')
sig_size0 = ctypes.c_uint32()
sig_size0.value = ssl.ECDSA_size(self.k)
mb_sig = ctypes.create_string_buffer(sig_size0.value)
result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
assert 1 == result
assert mb_sig.raw[0] == 0x30
assert mb_sig.raw[1] == sig_size0.value - 2
total_size = mb_sig.raw[1]
assert mb_sig.raw[2] == 2
r_size = mb_sig.raw[3]
assert mb_sig.raw[4 + r_size] == 2
s_size = mb_sig.raw[5 + r_size]
s_value = int.from_bytes(mb_sig.raw[6+r_size:6+r_size+s_size], byteorder='big')
if (not low_s) or s_value <= SECP256K1_ORDER_HALF:
return mb_sig.raw[:sig_size0.value]
else:
low_s_value = SECP256K1_ORDER - s_value
low_s_bytes = (low_s_value).to_bytes(33, byteorder='big')
while len(low_s_bytes) > 1 and low_s_bytes[0] == 0 and low_s_bytes[1] < 0x80:
low_s_bytes = low_s_bytes[1:]
new_s_size = len(low_s_bytes)
new_total_size_byte = (total_size + new_s_size - s_size).to_bytes(1,byteorder='big')
new_s_size_byte = (new_s_size).to_bytes(1,byteorder='big')
return b'\x30' + new_total_size_byte + mb_sig.raw[2:5+r_size] + new_s_size_byte + low_s_bytes
def verify(self, hash, sig):
"""Verify a DER signature"""
return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1
def set_compressed(self, compressed):
if compressed:
form = self.POINT_CONVERSION_COMPRESSED
else:
form = self.POINT_CONVERSION_UNCOMPRESSED
ssl.EC_KEY_set_conv_form(self.k, form)
class CPubKey(bytes):
"""An encapsulated public key
Attributes:
is_valid - Corresponds to CPubKey.IsValid()
is_fullyvalid - Corresponds to CPubKey.IsFullyValid()
is_compressed - Corresponds to CPubKey.IsCompressed()
"""
def __new__(cls, buf, _cec_key=None):
self = super(CPubKey, cls).__new__(cls, buf)
if _cec_key is None:
_cec_key = CECKey()
self._cec_key = _cec_key
self.is_fullyvalid = _cec_key.set_pubkey(self) != 0
return self
@property
def is_valid(self):
return len(self) > 0
@property
def is_compressed(self):
return len(self) == 33
def verify(self, hash, sig):
return self._cec_key.verify(hash, sig)
def __str__(self):
return repr(self)
def __repr__(self):
# Always have represent as b'<secret>' so test cases don't have to
# change for py2/3
if sys.version > '3':
return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
else:
return '%s(b%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
| 36.381974
| 130
| 0.687389
|
663c64b471cba32f2d75da3d7fc52c95bc2729eb
| 647
|
py
|
Python
|
Codefights/arcade/python-arcade/level-9/57.Correct-Lineup/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7
|
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codefights/arcade/python-arcade/level-9/57.Correct-Lineup/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codefights/arcade/python-arcade/level-9/57.Correct-Lineup/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python3
from solution1 import correctLineup as f
qa = [
([1, 2, 3, 4, 5, 6],
[2, 1, 4, 3, 6, 5]),
([13, 42],
[42, 13]),
([2, 3, 1, 100, 99, 45, 22, 28],
[3, 2, 100, 1, 45, 99, 28, 22]),
([85, 32, 45, 67, 32, 12, 45, 67],
[32, 85, 67, 45, 12, 32, 67, 45]),
([60, 2, 24, 40],
[2, 60, 40, 24])
]
for *q, a in qa:
for i, e in enumerate(q):
print('input{0}: {1}'.format(i + 1, e))
ans = f(*q)
if ans != a:
print(' [failed]')
print(' output:', ans)
print(' expected:', a)
else:
print(' [ok]')
print(' output:', ans)
print()
| 21.566667
| 47
| 0.409583
|
c4aa585be474782594a907ab6ed76b4ebec08d4e
| 18,553
|
py
|
Python
|
src/robot/variables/variables.py
|
VegiS/robotframework
|
32dddb38f9d812497f9a89436e19925e4dbec299
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robot/variables/variables.py
|
VegiS/robotframework
|
32dddb38f9d812497f9a89436e19925e4dbec299
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robot/variables/variables.py
|
VegiS/robotframework
|
32dddb38f9d812497f9a89436e19925e4dbec299
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-10-30T15:40:09.000Z
|
2018-10-30T15:40:09.000Z
|
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import re
import inspect
from functools import partial
from contextlib import contextmanager
from UserDict import UserDict
try:
from java.lang.System import getProperty as getJavaSystemProperty
from java.util import Map
except ImportError:
getJavaSystemProperty = lambda name: None
class Map: pass
from robot import utils
from robot.errors import DataError
from robot.output import LOGGER
from .isvar import is_var, is_scalar_var, is_list_var
from .variablesplitter import VariableSplitter
class Variables(utils.NormalizedDict):
"""Represents a set of variables including both ${scalars} and @{lists}.
Contains methods for replacing variables from list, scalars, and strings.
On top of ${scalar} and @{list} variables these methods handle also
%{environment} variables.
"""
_extended_var_re = re.compile(r'''
^\${ # start of the string and "${"
(.+?) # base name (group 1)
([^\s\w].+) # extended part (group 2)
}$ # "}" and end of the string
''', re.UNICODE|re.VERBOSE)
def __init__(self, identifiers=('$', '@', '%', '&', '*')):
utils.NormalizedDict.__init__(self, ignore='_')
self._identifiers = identifiers
importer = utils.Importer('variable file').import_class_or_module_by_path
self._import_variable_file = partial(importer, instantiate_with_args=())
def __setitem__(self, name, value):
self._validate_var_name(name)
utils.NormalizedDict.__setitem__(self, name, value)
def update(self, dict=None, **kwargs):
if dict:
self._validate_var_dict(dict)
UserDict.update(self, dict)
for key in dict:
self._add_key(key)
if kwargs:
self.update(kwargs)
def __getitem__(self, name):
self._validate_var_name(name)
try:
return self._find_variable(name)
except KeyError:
try:
return self._get_number_var(name)
except ValueError:
try:
return self._get_list_var_as_scalar(name)
except ValueError:
try:
return self._get_scalar_var_as_list(name)
except ValueError:
try:
return self._get_extended_var(name)
except ValueError:
self._raise_non_existing_variable(name)
def _find_variable(self, name):
variable = utils.NormalizedDict.__getitem__(self, name)
return self._solve_delayed(name, variable)
def _raise_non_existing_variable(self, name, msg=None, env_vars=False):
_raise_not_found(name, self.keys(), msg, env_vars=env_vars)
def _solve_delayed(self, name, value):
if isinstance(value, DelayedVariable):
return value.resolve(name, self)
return value
def resolve_delayed(self):
# cannot iterate over `self` here because loop changes the state.
for var in self.keys():
try:
self[var] # getting variable resolves it if needed
except DataError:
pass
def _validate_var_name(self, name):
if not is_var(name):
msg = "Variable name '%s' is invalid." % name
self._raise_non_existing_variable(name, msg=msg)
def _validate_var_dict(self, dict):
for name in dict:
self._validate_var_name(name)
def _get_list_var_as_scalar(self, name):
if not is_scalar_var(name):
raise ValueError
name = '@'+name[1:]
try:
return self._find_variable(name)
except KeyError:
return self._get_extended_var(name)
def _get_scalar_var_as_list(self, name):
if not is_list_var(name):
raise ValueError
name = '$'+name[1:]
try:
value = self._find_variable(name)
except KeyError:
value = self._get_extended_var(name)
if not utils.is_list_like(value):
raise DataError("Using scalar variable '%s' as list variable '@%s' "
"requires its value to be list or list-like."
% (name, name[1:]))
return value
def _get_extended_var(self, name):
err_pre = "Resolving variable '%s' failed: " % name
res = self._extended_var_re.search(name)
if res is None:
raise ValueError
base_name = res.group(1)
expression = res.group(2)
try:
variable = self['${%s}' % base_name]
except DataError, err:
raise DataError(err_pre + unicode(err))
try:
return eval('_BASE_VAR_' + expression, {'_BASE_VAR_': variable})
except:
raise DataError(err_pre + utils.get_error_message())
def _get_number_var(self, name):
if name[0] != '$':
raise ValueError
number = self._normalize(name)[2:-1]
try:
return self._get_int_var(number)
except ValueError:
return float(number)
def _get_int_var(self, number):
bases = {'0b': 2, '0o': 8, '0x': 16}
if number.startswith(tuple(bases)):
return int(number[2:], bases[number[:2]])
return int(number)
def replace_list(self, items, replace_until=None):
"""Replaces variables from a list of items.
If an item in a list is a @{list} variable its value is returned.
Possible variables from other items are replaced using 'replace_scalar'.
Result is always a list.
'replace_until' can be used to limit replacing arguments to certain
index from the beginning. Used with Run Keyword variants that only
want to resolve some of the arguments in the beginning and pass others
to called keywords unmodified.
"""
items = list(items or [])
if replace_until is not None:
return self._replace_list_until(items, replace_until)
return self._replace_list(items)
def _replace_list_until(self, items, replace_until):
# @{list} variables can contain more or less arguments than needed.
# Therefore we need to go through arguments one by one.
processed = []
while len(processed) < replace_until and items:
processed.extend(self._replace_list([items.pop(0)]))
# If @{list} variable is opened, arguments going further must be
# escaped to prevent them being un-escaped twice.
if len(processed) > replace_until:
processed[replace_until:] = [self._escape(item)
for item in processed[replace_until:]]
return processed + items
def _escape(self, item):
item = utils.escape(item)
# Escape also special syntax used by Run Kw If and Run Kws.
if item in ('ELSE', 'ELSE IF', 'AND'):
item = '\\' + item
return item
def _replace_list(self, items):
results = []
for item in items:
listvar = self._replace_variables_inside_possible_list_var(item)
if listvar:
results.extend(self[listvar])
else:
results.append(self.replace_scalar(item))
return results
def _replace_variables_inside_possible_list_var(self, item):
if not (isinstance(item, basestring) and
item.startswith('@{') and item.endswith('}')):
return None
var = VariableSplitter(item, self._identifiers)
if var.start != 0 or var.end != len(item):
return None
return '@{%s}' % var.get_replaced_base(self)
def replace_scalar(self, item):
"""Replaces variables from a scalar item.
If the item is not a string it is returned as is. If it is a ${scalar}
variable its value is returned. Otherwise variables are replaced with
'replace_string'. Result may be any object.
"""
if self._cannot_have_variables(item):
return utils.unescape(item)
var = VariableSplitter(item, self._identifiers)
if var.identifier and var.base and var.start == 0 and var.end == len(item):
return self._get_variable(var)
return self.replace_string(item, var)
def _cannot_have_variables(self, item):
return not (isinstance(item, basestring) and '{' in item)
def replace_string(self, string, splitter=None, ignore_errors=False):
"""Replaces variables from a string. Result is always a string."""
if self._cannot_have_variables(string):
return utils.unescape(string)
result = []
if splitter is None:
splitter = VariableSplitter(string, self._identifiers)
while True:
if splitter.identifier is None:
result.append(utils.unescape(string))
break
result.append(utils.unescape(string[:splitter.start]))
try:
value = self._get_variable(splitter)
except DataError:
if not ignore_errors:
raise
value = string[splitter.start:splitter.end]
if not isinstance(value, unicode):
value = utils.unic(value)
result.append(value)
string = string[splitter.end:]
splitter = VariableSplitter(string, self._identifiers)
result = ''.join(result)
return result
def _get_variable(self, var):
"""'var' is an instance of a VariableSplitter"""
# 1) Handle reserved syntax
if var.identifier not in '$@%':
value = '%s{%s}' % (var.identifier, var.base)
LOGGER.warn("Syntax '%s' is reserved for future use. Please "
"escape it like '\\%s'." % (value, value))
return value
# 2) Handle environment variables and Java system properties
elif var.identifier == '%':
name = var.get_replaced_base(self).strip()
if not name:
return '%%{%s}' % var.base
value = utils.get_env_var(name)
if value is not None:
return value
value = getJavaSystemProperty(name)
if value is not None:
return value
msg = "Environment variable '%%{%s}' not found." % name
self._raise_non_existing_variable('%%{%s}' % name, msg,
env_vars=True)
# 3) Handle ${scalar} variables and @{list} variables without index
elif var.index is None:
name = '%s{%s}' % (var.identifier, var.get_replaced_base(self))
return self[name]
# 4) Handle items from list variables e.g. @{var}[1]
else:
try:
index = int(self.replace_string(var.index))
name = '@{%s}' % var.get_replaced_base(self)
return self[name][index]
except (ValueError, DataError, IndexError):
msg = ("Variable '@{%s}[%s]' not found."
% (var.base, var.index))
self._raise_non_existing_variable(var.base, msg)
def set_from_file(self, path, args=None, overwrite=False):
LOGGER.info("Importing variable file '%s' with args %s" % (path, args))
var_file = self._import_variable_file(path)
try:
variables = self._get_variables_from_var_file(var_file, args)
self._set_from_file(variables, overwrite)
except:
amsg = 'with arguments %s ' % utils.seq2str2(args) if args else ''
raise DataError("Processing variable file '%s' %sfailed: %s"
% (path, amsg, utils.get_error_message()))
return variables
# This can be used with variables got from set_from_file directly to
# prevent importing same file multiple times
def _set_from_file(self, variables, overwrite=False):
list_prefix = 'LIST__'
for name, value in variables:
if name.startswith(list_prefix):
name = '@{%s}' % name[len(list_prefix):]
try:
if isinstance(value, basestring):
raise TypeError
value = list(value)
except TypeError:
raise DataError("List variable '%s' cannot get a non-list "
"value '%s'" % (name, utils.unic(value)))
else:
name = '${%s}' % name
if overwrite or not self.contains(name):
self.set(name, value)
def set_from_variable_table(self, variables, overwrite=False):
for var in variables:
if not var:
continue
try:
name, value = self._get_var_table_name_and_value(
var.name, var.value, var.report_invalid_syntax)
if overwrite or not self.contains(name):
self.set(name, value)
except DataError, err:
var.report_invalid_syntax(err)
def _get_var_table_name_and_value(self, name, value, error_reporter):
self._validate_var_name(name)
if is_scalar_var(name) and isinstance(value, basestring):
value = [value]
else:
self._validate_var_is_not_scalar_list(name, value)
value = [self._unescape_leading_trailing_spaces(cell) for cell in value]
return name, DelayedVariable(value, error_reporter)
def _unescape_leading_trailing_spaces(self, item):
if item.endswith(' \\'):
item = item[:-1]
if item.startswith('\\ '):
item = item[1:]
return item
def _validate_var_is_not_scalar_list(self, name, value):
if is_scalar_var(name) and len(value) > 1:
raise DataError("Creating a scalar variable with a list value in "
"the Variable table is no longer possible. "
"Create a list variable '@%s' and use it as a "
"scalar variable '%s' instead." % (name[1:], name))
def _get_variables_from_var_file(self, var_file, args):
variables = self._get_dynamical_variables(var_file, args or ())
if variables is not None:
return variables
names = self._get_static_variable_names(var_file)
return self._get_static_variables(var_file, names)
def _get_dynamical_variables(self, var_file, args):
get_variables = getattr(var_file, 'get_variables', None)
if not get_variables:
get_variables = getattr(var_file, 'getVariables', None)
if not get_variables:
return None
variables = get_variables(*args)
if utils.is_dict_like(variables):
return variables.items()
if isinstance(variables, Map):
return [(entry.key, entry.value) for entry in variables.entrySet()]
raise DataError("Expected mapping but %s returned %s."
% (get_variables.__name__, type(variables).__name__))
def _get_static_variable_names(self, var_file):
names = [attr for attr in dir(var_file) if not attr.startswith('_')]
if hasattr(var_file, '__all__'):
names = [name for name in names if name in var_file.__all__]
return names
def _get_static_variables(self, var_file, names):
variables = [(name, getattr(var_file, name)) for name in names]
if not inspect.ismodule(var_file):
variables = [var for var in variables if not callable(var[1])]
return variables
def has_key(self, variable):
try:
self[variable]
except DataError:
return False
else:
return True
__contains__ = has_key
def contains(self, variable, extended=False):
if extended:
return self.has_key(variable)
return utils.NormalizedDict.has_key(self, variable)
class DelayedVariable(object):
def __init__(self, value, error_reporter):
self._value = value
self._error_reporter = error_reporter
self._resolving = False
def resolve(self, name, variables):
try:
value = self._resolve(name, variables)
except DataError, err:
variables.pop(name)
self._error_reporter(unicode(err))
msg = "Variable '%s' not found." % name
_raise_not_found(name, variables, msg)
variables[name] = value
return value
def _resolve(self, name, variables):
with self._avoid_recursion:
if is_list_var(name):
return variables.replace_list(self._value)
return variables.replace_scalar(self._value[0])
@property
@contextmanager
def _avoid_recursion(self):
if self._resolving:
raise DataError('Recursive variable definition.')
self._resolving = True
try:
yield
finally:
self._resolving = False
def _raise_not_found(name, candidates, msg=None, env_vars=False):
"""Raise DataError for missing variable name.
Return recommendations for similar variable names if any are found.
"""
if msg is None:
msg = "Variable '%s' not found." % name
if env_vars:
candidates += ['%%{%s}' % var for var in
utils.get_env_vars()]
normalizer = partial(utils.normalize, ignore='$@%&*{}_', caseless=True,
spaceless=True)
finder = utils.RecommendationFinder(normalizer)
recommendations = finder.find_recommendations(name, candidates)
msg = finder.format_recommendations(msg, recommendations)
raise DataError(msg)
| 38.732777
| 83
| 0.598879
|
eaf3909ef9e912fde1bc9d6fd955646cc8ea6bd9
| 4,327
|
py
|
Python
|
ivy/functional/backends/jax/elementwise.py
|
stolensharingan/ivy
|
57b79ca51bb8d7eb43e72a2e0e03345c5e5cb56e
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/backends/jax/elementwise.py
|
stolensharingan/ivy
|
57b79ca51bb8d7eb43e72a2e0e03345c5e5cb56e
|
[
"Apache-2.0"
] | 1
|
2022-03-08T13:29:20.000Z
|
2022-03-08T13:29:20.000Z
|
ivy/functional/backends/jax/elementwise.py
|
stolensharingan/ivy
|
57b79ca51bb8d7eb43e72a2e0e03345c5e5cb56e
|
[
"Apache-2.0"
] | null | null | null |
# global
import jax
import jax.numpy as jnp
# local
from ivy.functional.backends.jax import JaxArray
def expm1(x: JaxArray)\
-> JaxArray:
return jnp.expm1(x)
def bitwise_invert(x: JaxArray)\
-> JaxArray:
return jnp.bitwise_not(x)
def bitwise_and(x1: JaxArray,
x2: JaxArray)\
-> JaxArray:
return jnp.bitwise_and(x1, x2)
def ceil(x: JaxArray)\
-> JaxArray:
if 'int' in str(x.dtype):
return x
return jnp.ceil(x)
def floor(x: JaxArray)\
-> JaxArray:
if 'int' in str(x.dtype):
return x
return jnp.floor(x)
def isfinite(x: JaxArray)\
-> JaxArray:
return jnp.isfinite(x)
def asin(x: JaxArray)\
-> JaxArray:
return jnp.arcsin(x)
def isinf(x: JaxArray)\
-> JaxArray:
return jnp.isinf(x)
def equal(x1: JaxArray, x2: JaxArray)\
-> JaxArray:
return x1 == x2
def greater_equal(x1: JaxArray, x2: JaxArray)\
-> JaxArray:
return jnp.greater_equal(x1, x2)
def less_equal(x1: JaxArray, x2: JaxArray)\
-> JaxArray:
return x1 <= x2
def asinh(x: JaxArray)\
-> JaxArray:
return jnp.arcsinh(x)
def sqrt(x: JaxArray)\
-> JaxArray:
return jnp.sqrt(x)
def cosh(x: JaxArray)\
-> JaxArray:
return jnp.cosh(x)
def log10(x: JaxArray)\
-> JaxArray:
return jnp.log10(x)
def log(x: JaxArray)\
-> JaxArray:
return jnp.log(x)
def log2(x: JaxArray)\
-> JaxArray:
return jnp.log2(x)
def log1p(x: JaxArray)\
-> JaxArray:
return jnp.log1p(x)
def isnan(x: JaxArray)\
-> JaxArray:
return jnp.isnan(x)
def less(x1: JaxArray, x2: JaxArray)\
-> JaxArray:
return jnp.less(x1, x2)
def cos(x: JaxArray)\
-> JaxArray:
return jnp.cos(x)
def logical_xor(x1: JaxArray, x2: JaxArray)\
-> JaxArray:
return jnp.logical_xor(x1, x2)
def logical_or(x1: JaxArray, x2: JaxArray)\
-> JaxArray:
return jnp.logical_or(x1, x2)
def logical_and(x1: JaxArray, x2: JaxArray)\
-> JaxArray:
return jnp.logical_and(x1, x2)
def logical_not(x: JaxArray)\
-> JaxArray:
return jnp.logical_not(x)
def acos(x: JaxArray)\
-> JaxArray:
return jnp.arccos(x)
def acosh(x: JaxArray)\
-> JaxArray:
return jnp.arccosh(x)
def sin(x: JaxArray)\
-> JaxArray:
return jnp.sin(x)
def negative(x: JaxArray) -> JaxArray:
return jnp.negative(x)
def not_equal(x1: JaxArray, x2: JaxArray) \
-> JaxArray:
return jnp.not_equal(x1, x2)
def tanh(x: JaxArray)\
-> JaxArray:
return jnp.tanh(x)
def bitwise_or(x1: JaxArray, x2: JaxArray) -> JaxArray:
if isinstance(x1,int):
if x1 > 9223372036854775807:
x1 = jnp.array(x1,dtype='uint64')
if isinstance(x2,int):
if x2 > 9223372036854775807:
x2 = jnp.array(x2,dtype='uint64')
return jnp.bitwise_or(x1, x2)
def sinh(x: JaxArray)\
-> JaxArray:
return jnp.sinh(x)
def positive(x: JaxArray)\
-> JaxArray:
return jnp.positive(x)
def square(x: JaxArray)\
-> JaxArray:
return jnp.square(x)
def remainder(x1: JaxArray, x2: JaxArray)\
-> JaxArray:
return jnp.remainder(x1, x2)
def round(x: JaxArray)\
-> JaxArray:
if 'int' in str(x.dtype):
return x
return jnp.round(x)
def abs(x: JaxArray)\
-> JaxArray:
return jnp.absolute(x)
def subtract(x1: JaxArray, x2: JaxArray)\
-> JaxArray:
if hasattr(x1, 'dtype') and hasattr(x2, 'dtype'):
promoted_type = jnp.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(promoted_type)
x2 = x2.astype(promoted_type)
return jnp.subtract(x1, x2)
def logaddexp(x1: JaxArray, x2: JaxArray) -> JaxArray:
return jnp.logaddexp(x1, x2)
tan = jnp.tan
def atan(x: JaxArray)\
-> JaxArray:
return jnp.arctan(x)
atan2 = jnp.arctan2
cosh = jnp.cosh
atanh = jnp.arctanh
log = jnp.log
exp = jnp.exp
# Extra #
# ------#
erf = jax.scipy.special.erf
| 17.733607
| 62
| 0.561821
|
44d60444290ea174d77f766dffcc07832ee153c7
| 3,565
|
py
|
Python
|
python/tests/collection/test_set.py
|
tachyonsoftware/algorithms
|
29149a55de173b42dfa461838877fa66d018629f
|
[
"MIT"
] | 17
|
2016-06-29T08:41:51.000Z
|
2022-02-04T13:39:13.000Z
|
python/tests/collection/test_set.py
|
tachyonsoftware/algorithms
|
29149a55de173b42dfa461838877fa66d018629f
|
[
"MIT"
] | null | null | null |
python/tests/collection/test_set.py
|
tachyonsoftware/algorithms
|
29149a55de173b42dfa461838877fa66d018629f
|
[
"MIT"
] | 4
|
2016-05-09T19:02:47.000Z
|
2021-02-22T02:05:41.000Z
|
import unittest
import random
from collection.set import Set
class TestSet(unittest.TestCase):
def setUp(self):
self.set = Set()
def test_constructor(self):
self.assertTrue(self.set.is_empty())
self.assertEquals(0, len(self.set))
def test_one_add(self):
element = 'foo'
self.set.add(element)
self.assertFalse(self.set.is_empty())
self.assertEquals(1, len(self.set))
self.assertTrue(self.set.exists(element))
def test_multiple_adds(self):
elements = range(100)
for element in elements:
self.set.add(element)
for element in elements:
self.assertTrue(self.set.exists(element))
self.assertFalse(self.set.is_empty())
self.assertEquals(len(elements), len(self.set))
def test_clear(self):
elements = range(100)
for element in elements:
self.set.add(element)
self.set.clear()
self.test_constructor()
def test_one_remove(self):
element = 'foo'
self.set.add(element)
self.set.remove(element)
self.assertTrue(self.set.is_empty())
self.assertEquals(0, len(self.set))
self.assertFalse(self.set.exists(element))
def test_multiple_removes(self):
elements = range(100)
for element in elements:
self.set.add(element)
for element in elements:
self.set.remove(element)
for element in elements:
self.assertFalse(self.set.exists(element))
self.assertTrue(self.set.is_empty())
self.assertEquals(0, len(self.set))
def test_remove_empty(self):
self.assertRaises(KeyError, self.set.remove, 'foo')
def test_remove_too_many(self):
element = 'foo'
self.set.add(element)
self.set.remove(element)
self.assertRaises(KeyError, self.set.remove, element)
def test_remove_empty(self):
self.assertRaises(KeyError, self.set.remove, 'foo')
def test_merge_empty_sets(self):
self.assertEquals(Set().merge(Set()), Set())
def test_merge_multiple_elements(self):
lst0 = [1, 2, 3]
lst1 = [11, 12, 13]
lst2 = [13]
set0 = Set(lst0)
set1 = Set(lst1)
set2 = Set(lst2)
self.assertEquals(set0.merge(set1), Set(lst0 + lst1))
self.assertEquals(set1.merge(set2), set1)
def test_diff_empty_sets(self):
self.assertEquals(Set().diff(Set()), Set())
def test_diff_multiple_elements(self):
lst0 = [1, 2, 3]
lst1 = [11, 12, 13]
lst2 = [13]
lst3 = [11, 12]
set0 = Set(lst0)
set1 = Set(lst1)
set2 = Set(lst2)
set3 = Set(lst3)
self.assertEquals(set0.diff(set1), set0)
self.assertEquals(set1.diff(set0), set1)
self.assertEquals(set1.diff(set2), set3)
self.assertEquals(set2.diff(set1), Set())
def test_eq_empty(self):
self.assertTrue(Set() == Set())
def test_eq_not_empty(self):
set0 = Set([1, 2, 3])
set1 = Set([1, 2, 3])
set2 = Set([4, 5, 6])
self.assertTrue(set0 == set0)
self.assertTrue(set1 == set1)
self.assertTrue(set2 == set2)
self.assertFalse(set0 == Set())
self.assertFalse(Set() == set0)
self.assertTrue(set0 == set1)
self.assertTrue(set1 == set0)
self.assertFalse(set0 == set2)
self.assertFalse(set2 == set0)
self.assertFalse(set1 == set2)
self.assertFalse(set2 == set1)
| 26.804511
| 61
| 0.592146
|
23fe43459f2a1bcbad0bf001d7ec6bb9f373c4eb
| 3,146
|
py
|
Python
|
tests/test_sphinx/conftest.py
|
agaric/MyST-Parser
|
8dda2f73364853cb572a50c53434c385db266d7c
|
[
"MIT"
] | null | null | null |
tests/test_sphinx/conftest.py
|
agaric/MyST-Parser
|
8dda2f73364853cb572a50c53434c385db266d7c
|
[
"MIT"
] | null | null | null |
tests/test_sphinx/conftest.py
|
agaric/MyST-Parser
|
8dda2f73364853cb572a50c53434c385db266d7c
|
[
"MIT"
] | null | null | null |
"""
Uses sphinx's pytest fixture to run builds
usage:
.. code-block:: python
@pytest.mark.sphinx(
buildername='html',
srcdir='path/to/source')
def test_basic(app, status, warning, get_sphinx_app_output):
app.build()
assert 'build succeeded' in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
output = get_sphinx_app_output(app, buildername='html')
parameters available to parse to ``@pytest.mark.sphinx``:
- buildername='html'
- srcdir=None
- testroot='root' (only used if srcdir not set)
- freshenv=False
- confoverrides=None
- status=None
- warning=None
- tags=None
- docutilsconf=None
"""
import os
import pathlib
import pickle
import shutil
from docutils.nodes import document
import pytest
from sphinx.testing.path import path
SOURCE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "sourcedirs"))
# TODO autouse not working, may need to be in root conftest
# (ideally _build folder should be in tempdir)
# @pytest.fixture(scope="session", autouse=True)
@pytest.fixture()
def remove_sphinx_builds():
""" remove all build directories from the test folder
"""
yield
srcdirs = pathlib.Path(SOURCE_DIR)
for entry in srcdirs.iterdir(): # type: pathlib.Path
if entry.is_dir() and entry.joinpath("_build").exists():
shutil.rmtree(str(entry.joinpath("_build")))
@pytest.fixture
def get_sphinx_app_output(file_regression):
def read(
app,
buildername="html",
filename="index.html",
encoding="utf-8",
extract_body=False,
remove_scripts=False,
regress_html=False,
):
outpath = path(os.path.join(str(app.srcdir), "_build", buildername, filename))
if not outpath.exists():
raise IOError("no output file exists: {}".format(outpath))
content = outpath.text(encoding=encoding)
if regress_html:
# only regress the inner body, since other sections are non-deterministic
from bs4 import BeautifulSoup
soup = BeautifulSoup(content, "html.parser")
doc_div = soup.findAll("div", {"class": "documentwrapper"})[0]
file_regression.check(doc_div.prettify(), extension=".html")
return content
return read
@pytest.fixture
def get_sphinx_app_doctree(file_regression):
def read(
app,
filename="index.doctree",
folder="doctrees",
encoding="utf-8",
regress=False,
):
outpath = path(os.path.join(str(app.srcdir), "_build", folder, filename))
if not outpath.exists():
raise IOError("no output file exists: {}".format(outpath))
with open(outpath, "rb") as handle:
doctree = pickle.load(handle) # type: document
# convert absolute filenames
for node in doctree.traverse(lambda n: "source" in n):
node["source"] = pathlib.Path(node["source"]).name
if regress:
file_regression.check(doctree.pformat(), extension=".xml")
return doctree
return read
| 26.661017
| 86
| 0.646535
|
e660d8bf0b1a926db169702d5ff0bdc8525b49e1
| 10,949
|
py
|
Python
|
setup_services.py
|
filipmadej/bookclub-foundry-filipnew
|
d20d7f415b8aea505c2dc9fd287ccc29c3b57813
|
[
"MIT"
] | null | null | null |
setup_services.py
|
filipmadej/bookclub-foundry-filipnew
|
d20d7f415b8aea505c2dc9fd287ccc29c3b57813
|
[
"MIT"
] | null | null | null |
setup_services.py
|
filipmadej/bookclub-foundry-filipnew
|
d20d7f415b8aea505c2dc9fd287ccc29c3b57813
|
[
"MIT"
] | 1
|
2018-10-14T03:34:21.000Z
|
2018-10-14T03:34:21.000Z
|
#!/usr/bin/python
#***************************************************************************
# Copyright 2015 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
#***************************************************************************
import json
import logging
import logging.handlers
import os
import os.path
import sys
import time
import argparse
from subprocess import call, Popen, PIPE
GLOBALIZATION_SERVICE='IBM Globalization'
GLOBALIZATION_SERVICE_PLAN='Experimental'
GLOBALIZATION_SERVICE_NAME='IBM Globalization'
MACHINE_TRANSLATION_SERVICE='language_translation'
MACHINE_TRANSLATION_PLAN='standard'
MACHINE_TRANSLATION_NAME='mt-watson3'
TWITTER_SERVICE='twitterinsights'
TWITTER_PLAN='Free'
TWITTER_NAME='IBM Insights for Twitter'
DEFAULT_BRIDGEAPP_NAME='pipeline_bridge_app'
Logger=None
# setup logmet logging connection if it's available
def setupLogging ():
logger = logging.getLogger('pipeline')
if os.environ.get('DEBUG'):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# in any case, dump logging to the screen
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
if os.environ.get('DEBUG'):
handler.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.INFO)
logger.addHandler(handler)
return logger
# find the given service in our space, get its service name, or None
# if it's not there yet
def findServiceNameInSpace (service):
command = "cf services"
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if proc.returncode != 0:
Logger.info("Unable to lookup services, error was: " + out)
return None
foundHeader = False
serviceStart = -1
serviceEnd = -1
serviceName = None
for line in out.splitlines():
if (foundHeader == False) and (line.startswith("name")):
# this is the header bar, find out the spacing to parse later
# header is of the format:
#name service plan bound apps last operation
# and the spacing is maintained for following lines
serviceStart = line.find("service")
serviceEnd = line.find("plan")-1
foundHeader = True
elif foundHeader:
# have found the headers, looking for our service
if service in line:
# maybe found it, double check by making
# sure the service is in the right place,
# assuming we can check it
if (serviceStart > 0) and (serviceEnd > 0):
if service in line[serviceStart:serviceEnd]:
# this is the correct line - find the bound app(s)
# if there are any
serviceName = line[:serviceStart]
serviceName = serviceName.strip()
else:
continue
return serviceName
# find a service in our space, and if it's there, get the dashboard
# url for user info on it
def findServiceDashboard (service):
serviceName = findServiceNameInSpace(service)
if serviceName == None:
return None
command = "cf service \"" + serviceName + "\""
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if proc.returncode != 0:
return None
serviceURL = None
for line in out.splitlines():
if line.startswith("Dashboard: "):
serviceURL = line[11:]
else:
continue
return serviceURL
# search cf, find an app in our space bound to the given service, and return
# the app name if found, or None if not
def findBoundAppForService (service):
proc = Popen(["cf services"], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if proc.returncode != 0:
return None
foundHeader = False
serviceStart = -1
serviceEnd = -1
boundStart = -1
boundEnd = -1
boundApp = None
for line in out.splitlines():
if (foundHeader == False) and (line.startswith("name")):
# this is the header bar, find out the spacing to parse later
# header is of the format:
#name service plan bound apps last operation
# and the spacing is maintained for following lines
serviceStart = line.find("service")
serviceEnd = line.find("plan")-1
boundStart = line.find("bound apps")
boundEnd = line.find("last operation")
foundHeader = True
elif foundHeader:
# have found the headers, looking for our service
if service in line:
# maybe found it, double check by making
# sure the service is in the right place,
# assuming we can check it
if (serviceStart > 0) and (serviceEnd > 0) and (boundStart > 0) and (boundEnd > 0):
if service in line[serviceStart:serviceEnd]:
# this is the correct line - find the bound app(s)
# if there are any
boundApp = line[boundStart:boundEnd]
else:
continue
# if we found a binding, make sure we only care about the first one
if boundApp != None:
if boundApp.find(",") >=0 :
boundApp = boundApp[:boundApp.find(",")]
boundApp = boundApp.strip()
if boundApp=="":
boundApp = None
if os.environ.get('DEBUG'):
if boundApp == None:
Logger.debug("No existing apps found bound to service \"" + service + "\"")
else:
Logger.debug("Found existing service \"" + boundApp + "\" bound to service \"" + service + "\"")
return boundApp
# look for our default bridge app. if it's not there, create it
def checkAndCreateBridgeApp (appName=DEFAULT_BRIDGEAPP_NAME):
# first look to see if the bridge app already exists
command = "cf apps"
Logger.debug("Executing command \"" + command + "\"")
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if os.environ.get('DEBUG'):
Logger.debug("command \"" + command + "\" returned with rc=" + str(proc.returncode))
Logger.debug("\tstdout was " + out)
Logger.debug("\tstderr was " + err)
if proc.returncode != 0:
return None
for line in out.splitlines():
if line.startswith(appName + " "):
# found it!
return True
# our bridge app isn't around, create it
Logger.info("Bridge app does not exist, attempting to create it")
command = "cf push " + appName + " -i 1 -d mybluemix.net -k 1M -m 64M --no-hostname --no-manifest --no-route --no-start"
Logger.debug("Executing command \"" + command + "\"")
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if os.environ.get('DEBUG'):
Logger.debug("command \"" + command + "\" returned with rc=" + str(proc.returncode))
Logger.debug("\tstdout was " + out)
Logger.debug("\tstderr was " + err)
if proc.returncode != 0:
Logger.info("Unable to create bridge app, error was: " + out)
return False
return True
def checkOrAddService (service, plan, serviceName):
# look to see if we have the service in our space
existingService = findServiceNameInSpace(service)
if existingService == None:
serviceName=serviceName
print "service " + service
print "plan " + plan
print "serviceName " + serviceName
Logger.info("Creating service \"" + service + "\" \"" + plan + "\"" + serviceName )
# if we don't have the service name, means the tile isn't created in our space, so go
# load it into our space if possible
if existingService == None:
Logger.info("Service \"" + service + "\" is not loaded in this space, attempting to load it")
command = "cf create-service \"" + service + "\" \"" + plan + "\" \"" + serviceName + "\""
Logger.debug("Executing command \"" + command + "\"")
proc = Popen([command],
shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if proc.returncode != 0:
Logger.info("Unable to create service in this space, error was: " + out)
return None
return serviceName
return existingService
# look for our bridge app to bind this service to. If it's not there,
# attempt to create it. Then bind the service to that app. If it
# all works, return that app name as the bound app
def createBoundAppForService (service, plan, serviceName, appName=DEFAULT_BRIDGEAPP_NAME):
if not checkAndCreateBridgeApp(appName):
return None
print "service " + service
print "plan " + plan
print "serviceName " + serviceName
print "appName " + appName
serviceName=checkOrAddService(service, plan, serviceName)
# now try to bind the service to our bridge app
Logger.info("Binding service \"" + serviceName + "\" to app \"" + appName + "\"")
proc = Popen(["cf bind-service " + appName + " \"" + serviceName + "\""],
shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if proc.returncode != 0:
Logger.info("Unable to bind service to the bridge app, error was: " + out)
return None
return appName
def setenvvariable(key, value, filename="setenv_globalization.sh"):
keyvalue = 'export %s=%s\n' % (key, value)
open(filename, 'a+').write(keyvalue)
# begin main execution sequence
try:
Logger = setupLogging()
parser = argparse.ArgumentParser()
parser.add_argument("--app")
args=parser.parse_args()
if args.app:
appName=args.app
else:
appName=DEFAULT_BRIDGEAPP_NAME
createBoundAppForService(GLOBALIZATION_SERVICE, GLOBALIZATION_SERVICE_PLAN, GLOBALIZATION_SERVICE_NAME,appName)
createBoundAppForService(MACHINE_TRANSLATION_SERVICE, MACHINE_TRANSLATION_PLAN, MACHINE_TRANSLATION_NAME,appName)
createBoundAppForService(TWITTER_SERVICE, TWITTER_PLAN, TWITTER_NAME,appName)
sys.exit(0)
except Exception, e:
Logger.warning("Exception received", exc_info=e)
sys.exit(1)
| 36.375415
| 124
| 0.623984
|
af63ffaf362141f219d73543a798152a642a27e0
| 2,587
|
py
|
Python
|
bookwyrm/tests/views/admin/test_ip_blocklist.py
|
mouse-reeve/fedireads
|
e3471fcc3500747a1b1deaaca662021aae5b08d4
|
[
"CC0-1.0"
] | 270
|
2020-01-27T06:06:07.000Z
|
2020-06-21T00:28:18.000Z
|
bookwyrm/tests/views/admin/test_ip_blocklist.py
|
mouse-reeve/fedireads
|
e3471fcc3500747a1b1deaaca662021aae5b08d4
|
[
"CC0-1.0"
] | 158
|
2020-02-10T20:36:54.000Z
|
2020-06-26T17:12:54.000Z
|
bookwyrm/tests/views/admin/test_ip_blocklist.py
|
mouse-reeve/fedireads
|
e3471fcc3500747a1b1deaaca662021aae5b08d4
|
[
"CC0-1.0"
] | 15
|
2020-02-13T21:53:33.000Z
|
2020-06-17T16:52:46.000Z
|
""" test for app action functionality """
from unittest.mock import patch
from django.template.response import TemplateResponse
from django.test import TestCase
from django.test.client import RequestFactory
from bookwyrm import forms, models, views
from bookwyrm.tests.validate_html import validate_html
class IPBlocklistViews(TestCase):
"""every response to a get request, html or json"""
def setUp(self):
"""we need basic test data and mocks"""
self.factory = RequestFactory()
with patch("bookwyrm.suggested_users.rerank_suggestions_task.delay"), patch(
"bookwyrm.activitystreams.populate_stream_task.delay"
), patch("bookwyrm.lists_stream.populate_lists_task.delay"):
self.local_user = models.User.objects.create_user(
"mouse@local.com",
"mouse@mouse.mouse",
"password",
local=True,
localname="mouse",
)
models.SiteSettings.objects.create()
def test_blocklist_page_get(self):
"""there are so many views, this just makes sure it LOADS"""
view = views.IPBlocklist.as_view()
request = self.factory.get("")
request.user = self.local_user
request.user.is_superuser = True
result = view(request)
self.assertIsInstance(result, TemplateResponse)
validate_html(result.render())
self.assertEqual(result.status_code, 200)
def test_blocklist_page_post(self):
"""there are so many views, this just makes sure it LOADS"""
view = views.IPBlocklist.as_view()
form = forms.IPBlocklistForm()
form.data["address"] = "0.0.0.0"
request = self.factory.post("", form.data)
request.user = self.local_user
request.user.is_superuser = True
result = view(request)
self.assertIsInstance(result, TemplateResponse)
validate_html(result.render())
self.assertEqual(result.status_code, 200)
block = models.IPBlocklist.objects.get()
self.assertEqual(block.address, "0.0.0.0")
self.assertTrue(block.is_active)
def test_blocklist_page_delete(self):
"""there are so many views, this just makes sure it LOADS"""
block = models.IPBlocklist.objects.create(address="0.0.0.0")
view = views.IPBlocklist.as_view()
request = self.factory.post("")
request.user = self.local_user
request.user.is_superuser = True
view(request, block.id)
self.assertFalse(models.IPBlocklist.objects.exists())
| 34.959459
| 84
| 0.654039
|
fafaa89707e8c9308d81a8b25204778c333f19af
| 2,640
|
py
|
Python
|
zilencer/management/commands/render_messages.py
|
DD2480-group7-2020/zulip
|
9a1e18bcf383c38c35da168563a7345768c6d784
|
[
"Apache-2.0"
] | 1
|
2020-03-17T14:58:50.000Z
|
2020-03-17T14:58:50.000Z
|
zilencer/management/commands/render_messages.py
|
DD2480-group7-2020/zulip
|
9a1e18bcf383c38c35da168563a7345768c6d784
|
[
"Apache-2.0"
] | 2
|
2020-09-07T22:32:24.000Z
|
2021-05-08T18:17:53.000Z
|
zilencer/management/commands/render_messages.py
|
DD2480-group7-2020/zulip
|
9a1e18bcf383c38c35da168563a7345768c6d784
|
[
"Apache-2.0"
] | 1
|
2020-07-16T06:00:10.000Z
|
2020-07-16T06:00:10.000Z
|
import os
from typing import Any, Iterator
import ujson
from django.core.management.base import BaseCommand, CommandParser
from django.db.models import QuerySet
from zerver.lib.message import render_markdown
from zerver.models import Message
def queryset_iterator(queryset: QuerySet, chunksize: int=5000) -> Iterator[Any]:
queryset = queryset.order_by('id')
while queryset.exists():
for row in queryset[:chunksize]:
msg_id = row.id
yield row
queryset = queryset.filter(id__gt=msg_id)
class Command(BaseCommand):
help = """
Render messages to a file.
Usage: ./manage.py render_messages <destination> [--amount=10000]
"""
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument('destination', help='Destination file path')
parser.add_argument('--amount', default=100000, help='Number of messages to render')
parser.add_argument('--latest_id', default=0, help="Last message id to render")
def handle(self, *args: Any, **options: Any) -> None:
dest_dir = os.path.realpath(os.path.dirname(options['destination']))
amount = int(options['amount'])
latest = int(options['latest_id']) or Message.objects.latest('id').id
self.stdout.write('Latest message id: {latest}'.format(latest=latest))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
with open(options['destination'], 'w') as result:
result.write('[')
messages = Message.objects.filter(id__gt=latest - amount, id__lte=latest).order_by('id')
for message in queryset_iterator(messages):
content = message.content
# In order to ensure that the output of this tool is
# consistent across the time, even if messages are
# edited, we always render the original content
# version, extracting it from the edit history if
# necessary.
if message.edit_history:
history = ujson.loads(message.edit_history)
history = sorted(history, key=lambda i: i['timestamp'])
for entry in history:
if 'prev_content' in entry:
content = entry['prev_content']
break
result.write(ujson.dumps({
'id': message.id,
'content': render_markdown(message, content)
}))
if message.id != latest:
result.write(',')
result.write(']')
| 41.25
| 100
| 0.597348
|
abd147a4f754dbfea8b024cfa0c68b7e31194561
| 9,494
|
py
|
Python
|
Lib/site-packages/plotly/validators/_scattermapbox.py
|
tytanya/my-first-blog
|
2b40adb0816c3546e90ad6ca1e7fb50d924c1536
|
[
"bzip2-1.0.6"
] | 4
|
2020-02-05T11:26:47.000Z
|
2021-05-26T07:48:46.000Z
|
Lib/site-packages/plotly/validators/_scattermapbox.py
|
tytanya/my-first-blog
|
2b40adb0816c3546e90ad6ca1e7fb50d924c1536
|
[
"bzip2-1.0.6"
] | 6
|
2021-03-18T22:27:08.000Z
|
2022-03-11T23:40:50.000Z
|
venv/lib/python3.7/site-packages/plotly/validators/_scattermapbox.py
|
kylenahas/180LoginV1
|
8f64be6e6016d47dff8febfcfa3bbd56e9042f89
|
[
"MIT"
] | 1
|
2020-02-02T21:17:12.000Z
|
2020-02-02T21:17:12.000Z
|
import _plotly_utils.basevalidators
class ScattermapboxValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name='scattermapbox', parent_name='', **kwargs):
super(ScattermapboxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Scattermapbox'),
data_docs=kwargs.pop(
'data_docs', """
connectgaps
Determines whether or not gaps (i.e. {nan} or
missing values) in the provided data arrays are
connected.
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, "scatter" traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on plot.ly for
customdata .
fill
Sets the area to fill with a solid color. Use
with `fillcolor` if not "none". "toself"
connects the endpoints of the trace (or each
segment of the trace if it has gaps) into a
closed shape.
fillcolor
Sets the fill color. Defaults to a half-
transparent variant of the line color, marker
color, or marker line color, whichever is
available.
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on plot.ly for
hoverinfo .
hoverlabel
plotly.graph_objs.scattermapbox.Hoverlabel
instance or dict with compatible properties
hovertemplate
Template string used for rendering the
information that appear on hover box. Note that
this will override `hoverinfo`. Variables are
inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's
syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". See https://github.com/d3/d
3-format/blob/master/README.md#locale_format
for details on the formatting syntax. The
variables available in `hovertemplate` are the
ones emitted as event data described at this
link https://plot.ly/javascript/plotlyjs-
events/#event-data. Additionally, every
attributes that can be specified per-point (the
ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>".
hovertemplatesrc
Sets the source reference on plot.ly for
hovertemplate .
hovertext
Sets hover text elements associated with each
(lon,lat) pair If a single string, the same
string appears over all the data points. If an
array of string, the items are mapped in order
to the this trace's (lon,lat) coordinates. To
be seen, trace `hoverinfo` must contain a
"text" flag.
hovertextsrc
Sets the source reference on plot.ly for
hovertext .
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on plot.ly for ids .
lat
Sets the latitude coordinates (in degrees
North).
latsrc
Sets the source reference on plot.ly for lat .
legendgroup
Sets the legend group for this trace. Traces
part of the same legend group hide/show at the
same time when toggling legend items.
line
plotly.graph_objs.scattermapbox.Line instance
or dict with compatible properties
lon
Sets the longitude coordinates (in degrees
East).
lonsrc
Sets the source reference on plot.ly for lon .
marker
plotly.graph_objs.scattermapbox.Marker instance
or dict with compatible properties
mode
Determines the drawing mode for this scatter
trace. If the provided `mode` includes "text"
then the `text` elements appear at the
coordinates. Otherwise, the `text` elements
appear on hover.
name
Sets the trace name. The trace name appear as
the legend item and on hover.
opacity
Sets the opacity of the trace.
selected
plotly.graph_objs.scattermapbox.Selected
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected
points. Has an effect only for traces that
support selections. Note that an empty array
means an empty selection where the `unselected`
are turned on for all points, whereas, any
other non-array values means no selection all
where the `selected` and `unselected` styles
have no effect.
showlegend
Determines whether or not an item corresponding
to this trace is shown in the legend.
stream
plotly.graph_objs.scattermapbox.Stream instance
or dict with compatible properties
subplot
Sets a reference between this trace's data
coordinates and a mapbox subplot. If "mapbox"
(the default value), the data refer to
`layout.mapbox`. If "mapbox2", the data refer
to `layout.mapbox2`, and so on.
text
Sets text elements associated with each
(lon,lat) pair If a single string, the same
string appears over all the data points. If an
array of string, the items are mapped in order
to the this trace's (lon,lat) coordinates. If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be
seen in the hover labels.
textfont
Sets the icon text font
(color=mapbox.layer.paint.text-color,
size=mapbox.layer.layout.text-size). Has an
effect only when `type` is set to "symbol".
textposition
Sets the positions of the `text` elements with
respects to the (x,y) coordinates.
textsrc
Sets the source reference on plot.ly for text
.
uid
Assign an id to this trace, Use this to provide
object constancy between traces during
animations and transitions.
uirevision
Controls persistence of some user-driven
changes to the trace: `constraintrange` in
`parcoords` traces, as well as some `editable:
true` modifications such as `name` and
`colorbar.title`. Defaults to
`layout.uirevision`. Note that other user-
driven trace attribute changes are controlled
by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and
`colorbar.(x|y)` (accessible with `config:
{editable: true}`) is controlled by
`layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on
trace index if no `uid` is provided. So if your
app can add/remove traces before the end of the
`data` array, such that the same trace has a
different index, you can still preserve user-
driven changes if you give each trace a `uid`
that stays with it as it moves.
unselected
plotly.graph_objs.scattermapbox.Unselected
instance or dict with compatible properties
visible
Determines whether or not this trace is
visible. If "legendonly", the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
"""
),
**kwargs
)
| 47
| 78
| 0.550032
|
87279b15298ac664e31ae2317e97eccc01f0501b
| 3,194
|
py
|
Python
|
backslash/lazy_query.py
|
oren0e/backslash-python
|
37f0fe37e21c384baa27b4f5b7210e79d02a65dc
|
[
"BSD-3-Clause"
] | null | null | null |
backslash/lazy_query.py
|
oren0e/backslash-python
|
37f0fe37e21c384baa27b4f5b7210e79d02a65dc
|
[
"BSD-3-Clause"
] | null | null | null |
backslash/lazy_query.py
|
oren0e/backslash-python
|
37f0fe37e21c384baa27b4f5b7210e79d02a65dc
|
[
"BSD-3-Clause"
] | null | null | null |
import collections
import itertools
from sentinels import NOTHING
from ._compat import iteritems
from .utils import raise_for_status
class LazyQuery():
def __init__(self, client, path=None, url=None, query_params=None, page_size=100):
super().__init__()
self._client = client
if url is None:
url = client.api.url
if path is not None:
url = url.add_path(path)
if query_params is not None:
for (param, value) in query_params.items():
url = url.add_query_param(param, str(value))
self._url = url
self._fetched = collections.defaultdict(lambda: NOTHING)
self._total_num_objects = None
self._page_size = page_size
self._typename = None
def all(self):
return list(self)
def filter(self, *filter_objects, **fields):
returned_url = self._url
for filter_object in filter_objects:
returned_url = filter_object.add_to_url(returned_url)
for field_name, field_value in iteritems(fields):
returned_url = returned_url.add_query_param(field_name, str(field_value))
return LazyQuery(self._client, url=returned_url, page_size=self._page_size)
def __repr__(self):
return f'<Query {str(self._url)!r}>'
def __iter__(self):
for i in itertools.count():
item = self._fetch_index(i)
if item is NOTHING:
break
yield item
def __getitem__(self, idx):
if isinstance(idx, slice):
raise NotImplementedError() # pragma: no cover
if idx < 0:
if idx < -len(self):
raise IndexError()
idx = len(self) + idx
returned = self._fetch_index(idx)
if returned is NOTHING:
raise IndexError(idx)
return returned
def _fetch_index(self, index):
returned = self._fetched[index]
if returned is NOTHING:
page_index = (index // self._page_size) + 1
self._fetch_page(page_index)
returned = self._fetched[index]
return returned
def _fetch_page(self, page_index):
assert page_index != 0
response = self._client.api.session.get(self._url.add_query_param(
'page', str(page_index)).add_query_param('page_size', str(self._page_size)))
raise_for_status(response)
response_data = response.json()
keys = [key for key in response_data if key != 'meta']
if len(keys) > 1:
raise RuntimeError('Multiple keys returned')
[obj_typename] = keys
if self._typename is not None and obj_typename != self._typename:
raise RuntimeError(f'Got different typename in query: {obj_typename!r} != {self._typename!r}')
self._typename = obj_typename
for index, json_obj in enumerate(response_data[self._typename]):
real_index = ((page_index - 1) * self._page_size) + index
self._fetched[real_index] = self._client.api.build_api_object(json_obj)
return response_data
def count(self):
self._fetch_index(0)
return self._total_num_objects
| 35.098901
| 106
| 0.623669
|
3f80e93fe57a879c6e3bfe0ff9a10ed78e948e87
| 480
|
py
|
Python
|
pyBrematic/devices/intertechno/__init__.py
|
d-Rickyy-b/Brematic-Controller
|
878ace569ff7df0617a35f595cb74244c21ebb9c
|
[
"MIT"
] | 11
|
2018-05-02T21:31:57.000Z
|
2021-11-09T11:40:47.000Z
|
pyBrematic/devices/intertechno/__init__.py
|
d-Rickyy-b/Brematic-Controller
|
878ace569ff7df0617a35f595cb74244c21ebb9c
|
[
"MIT"
] | 20
|
2018-05-01T14:32:59.000Z
|
2022-02-14T21:53:58.000Z
|
pyBrematic/devices/intertechno/__init__.py
|
d-Rickyy-b/Brematic-Controller
|
878ace569ff7df0617a35f595cb74244c21ebb9c
|
[
"MIT"
] | 5
|
2018-11-08T15:35:48.000Z
|
2020-12-27T18:28:44.000Z
|
# -*- coding: utf-8 -*-
from .intertechnodevice import IntertechnoDevice
from .CMR1000 import CMR1000
from .CMR500 import CMR500
from .CMR300 import CMR300
from .ITL500 import ITL500
from .ITR3500 import ITR3500
from .PAR1500 import PAR1500
from .tools import calc_systemcode, calc_unitcode, calc_system_and_unit_code
__all__ = ["IntertechnoDevice", "CMR1000", "CMR500", "CMR300", "PAR1500", "ITR3500", "ITL500", "calc_systemcode", "calc_unitcode", "calc_system_and_unit_code"]
| 36.923077
| 159
| 0.783333
|
130d3b5030d8696e4d5acbf25ad2a87444dbc08d
| 25,929
|
py
|
Python
|
bayes_opt/transform_gp.py
|
AndRossi/OpenKE_BayesianOpt
|
31db25eb8406c6cf803e2187402290e466c0e824
|
[
"MIT"
] | 2
|
2020-08-01T03:00:24.000Z
|
2020-08-18T02:08:21.000Z
|
bayes_opt/transform_gp.py
|
AndRossi/OpenKE_BayesianOpt
|
31db25eb8406c6cf803e2187402290e466c0e824
|
[
"MIT"
] | null | null | null |
bayes_opt/transform_gp.py
|
AndRossi/OpenKE_BayesianOpt
|
31db25eb8406c6cf803e2187402290e466c0e824
|
[
"MIT"
] | 1
|
2020-08-18T02:08:23.000Z
|
2020-08-18T02:08:23.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 03 12:34:13 2016
@author: V
"""
# define Gaussian Process class
import numpy as np
from bayes_opt.acquisition_functions import AcquisitionFunction, unique_rows
from scipy.optimize import minimize
from sklearn.metrics.pairwise import euclidean_distances
from scipy.spatial.distance import pdist
from scipy.spatial.distance import cdist
#from eucl_dist.cpu_dist import dist
from sklearn.cluster import KMeans
import scipy.linalg as spla
from bayes_opt.acquisition_maximization import acq_max,acq_max_with_name
from scipy.spatial.distance import squareform
class TransformedGP(object):
# transform GP given known optimum value: f = f^* - 1/2 g^2
def __init__ (self,param):
# init the model
# theta for RBF kernel exp( -theta* ||x-y||)
if 'kernel' not in param:
param['kernel']='SE'
kernel_name=param['kernel']
if kernel_name not in ['SE','ARD']:
err = "The kernel function " \
"{} has not been implemented, " \
"please choose one of the kernel SE ARD.".format(kernel_name)
raise NotImplementedError(err)
else:
self.kernel_name = kernel_name
if 'flagIncremental' not in param:
self.flagIncremental=0
else:
self.flagIncremental=param['flagIncremental']
if 'lengthscale' not in param:
self.lengthscale=param['theta']
else:
self.lengthscale=param['lengthscale']
self.theta=self.lengthscale
if 'lengthscale_vector' not in param: # for marginalize hyperparameters
self.lengthscale_vector=[]
else:
self.lengthscale_vector=param['lengthscale_vector']
#self.theta=param['theta']
self.gp_params=param
self.nGP=0
# noise delta is for GP version with noise
self.noise_delta=param['noise_delta']
self.KK_x_x=[]
self.KK_x_x_inv=[]
self.fstar=0
self.X=[]
self.Y=[]
self.G=[]
self.lengthscale_old=self.lengthscale
self.flagOptimizeHyperFirst=0
self.alpha=[] # for Cholesky update
self.L=[] # for Cholesky update LL'=A
def kernel_dist(self, a,b,lengthscale):
if self.kernel_name == 'ARD':
return self.ARD_dist_func(a,b,lengthscale)
if self.kernel_name=='SE':
Euc_dist=euclidean_distances(a,b)
return np.exp(-np.square(Euc_dist)/lengthscale)
def ARD_dist_func(self,A,B,length_scale):
mysum=0
for idx,val in enumerate(length_scale):
mysum=mysum+((A[idx]-B[idx])**2)*1.0/val
dist=np.exp(-mysum)
return dist
def fit(self,X,Y,fstar):
"""
Fit Gaussian Process model
Input Parameters
----------
x: the observed points
y: the outcome y=f(x)
"""
ur = unique_rows(X)
X=X[ur]
Y=Y[ur]
self.X=X
self.Y=Y
self.fstar=fstar
self.G=np.sqrt(2.0*(fstar-Y))
#self.G=np.log(1.0*(fstar-Y))
#KK=pdist(self.X,lambda a,b: self.ARD_dist_func(a,b,self.theta))
if self.kernel_name=='SE':
Euc_dist=euclidean_distances(X,X)
self.KK_x_x=np.exp(-np.square(Euc_dist)/self.lengthscale)+np.eye(len(X))*self.noise_delta
else:
KK=pdist(self.X,lambda a,b: self.kernel_dist(a,b,self.lengthscale))
KK=squareform(KK)
self.KK_x_x=KK+np.eye(self.X.shape[0])*(1+self.noise_delta)
#Euc_dist=euclidean_distances(X,X)
#self.KK_x_x=np.exp(-self.theta*np.square(Euc_dist))+self.noise_delta
if np.isnan(self.KK_x_x).any(): #NaN
print("nan in KK_x_x")
self.KK_x_x_inv=np.linalg.pinv(self.KK_x_x)
self.L=np.linalg.cholesky(self.KK_x_x)
#temp=np.linalg.solve(self.L,self.Y)
tempG=np.linalg.solve(self.L,self.G-np.sqrt(2*self.fstar))
#self.alpha=np.linalg.solve(self.L.T,temp)
self.alphaG=np.linalg.solve(self.L.T,tempG)
def log_marginal_lengthscale(self,lengthscale,noise_delta):
"""
Compute Log Marginal likelihood of the GP model w.r.t. the provided lengthscale
"""
def compute_log_marginal(X,lengthscale,noise_delta):
# compute K
ur = unique_rows(self.X)
myX=self.X[ur]
#myY=np.sqrt(0.5*(self.fstar-self.Y[ur]))
myY=self.Y[ur]
if self.flagOptimizeHyperFirst==0:
if self.kernel_name=='SE':
self.Euc_dist_X_X=euclidean_distances(myX,myX)
KK=np.exp(-np.square(self.Euc_dist_X_X)/lengthscale)+np.eye(len(myX))*self.noise_delta
else:
KK=pdist(myX,lambda a,b: self.kernel_dist(a,b,lengthscale))
KK=squareform(KK)
KK=KK+np.eye(myX.shape[0])*(1+noise_delta)
self.flagOptimizeHyperFirst=1
else:
if self.kernel_name=='SE':
KK=np.exp(-np.square(self.Euc_dist_X_X)/lengthscale)+np.eye(len(myX))*self.noise_delta
else:
KK=pdist(myX,lambda a,b: self.kernel_dist(a,b,lengthscale))
KK=squareform(KK)
KK=KK+np.eye(myX.shape[0])*(1+noise_delta)
try:
temp_inv=np.linalg.solve(KK,myY)
except: # singular
return -np.inf
try:
#logmarginal=-0.5*np.dot(self.Y.T,temp_inv)-0.5*np.log(np.linalg.det(KK+noise_delta))-0.5*len(X)*np.log(2*3.14)
first_term=-0.5*np.dot(myY.T,temp_inv)
# if the matrix is too large, we randomly select a part of the data for fast computation
if KK.shape[0]>200:
idx=np.random.permutation(KK.shape[0])
idx=idx[:200]
KK=KK[np.ix_(idx,idx)]
#Wi, LW, LWi, W_logdet = pdinv(KK)
#sign,W_logdet2=np.linalg.slogdet(KK)
chol = spla.cholesky(KK, lower=True)
W_logdet=np.sum(np.log(np.diag(chol)))
# Uses the identity that log det A = log prod diag chol A = sum log diag chol A
#second_term=-0.5*W_logdet2
second_term=-W_logdet
except: # singular
return -np.inf
#print "first term ={:.4f} second term ={:.4f}".format(np.asscalar(first_term),np.asscalar(second_term))
logmarginal=first_term+second_term-0.5*len(myY)*np.log(2*3.14)
if np.isnan(np.asscalar(logmarginal))==True:
print("theta={:s} first term ={:.4f} second term ={:.4f}".format(lengthscale,np.asscalar(first_term),np.asscalar(second_term)))
#print temp_det
return np.asscalar(logmarginal)
#print lengthscale
logmarginal=0
if np.isscalar(lengthscale):
logmarginal=compute_log_marginal(self.X,lengthscale,noise_delta)
return logmarginal
if not isinstance(lengthscale,list) and len(lengthscale.shape)==2:
logmarginal=[0]*lengthscale.shape[0]
for idx in range(lengthscale.shape[0]):
logmarginal[idx]=compute_log_marginal(self.X,lengthscale[idx],noise_delta)
else:
logmarginal=compute_log_marginal(self.X,lengthscale,noise_delta)
#print logmarginal
return logmarginal
def leave_one_out_lengthscale(self,lengthscale,noise_delta):
"""
Compute Log Marginal likelihood of the GP model w.r.t. the provided lengthscale
"""
def compute_loo_predictive(X,lengthscale,noise_delta):
# compute K
ur = unique_rows(self.X)
myX=self.X[ur]
myY=self.Y[ur]
D=np.hstack((myX,myY.reshape(-1,1)))
LOO_sum=0
for i in range(0,D.shape[0]):
D_train=np.delete(D,i,0)
D_test=D[i,:]
Xtrain=D_train[:,:-1]
Ytrain=D_train[:,-1]
Xtest=D_test[:-1]
Ytest=D_test[-1]
gp_params= {'theta':lengthscale,'noise_delta':self.noise_delta}
gp=TransformedGP(gp_params)
try: # if SVD problem
gp.fit(Xtrain, Ytrain)
mu, sigma2 = gp.predict(Xtest, eval_MSE=True)
logpred=-np.log(np.sqrt(2*3.14))-(2)*np.log(sigma2)-np.square(Ytest-mu)/(2*sigma2)
except:
logpred=-999999
LOO_sum+=logpred
#return np.asscalar(LOO_sum)
return LOO_sum
#print lengthscale
logpred=0
if np.isscalar(lengthscale):
logpred=compute_loo_predictive(self.X,lengthscale,noise_delta)
return logpred
if not isinstance(lengthscale,list) and len(lengthscale.shape)==2:
logpred=[0]*lengthscale.shape[0]
for idx in range(lengthscale.shape[0]):
logpred[idx]=compute_loo_predictive(self.X,lengthscale[idx],noise_delta)
else:
logpred=compute_loo_predictive(self.X,lengthscale,noise_delta)
#print logmarginal
return logpred
def slice_sampling_lengthscale_SE(self,previous_theta,noise_delta,nSamples=10):
print("slice sampling lengthscale")
nBurnins=1
# define a bound on the lengthscale
bounds_lengthscale_min=0.000001*self.dim
bounds_lengthscale_max=1*self.dim
mybounds=np.asarray([bounds_lengthscale_min,bounds_lengthscale_max]).T
count=0
lengthscale_samples=[0]*nSamples
# init x
x0=np.random.uniform(mybounds[0],mybounds[1],1)
# marginal_llk at x0
self.flagOptimizeHyperFirst=0
y_marginal_llk=self.log_marginal_lengthscale(x0,noise_delta)
y=np.random.uniform(0,y_marginal_llk,1)
cut_min=0
count_reject=0
# burnins
while(count<nBurnins and count_reject<=5):
# sampling x
x=np.random.uniform(mybounds[0],mybounds[1],1)
# get f(x)
new_y_marginal_llk=self.log_marginal_lengthscale(x,noise_delta)
if new_y_marginal_llk>=y: # accept
#lengthscale_samples[count]=x
# sampling y
y=np.random.uniform(cut_min,new_y_marginal_llk,1)
cut_min=y
count=count+1
else:
count_reject=count_reject+1
count=0
count_reject=0
while(count<nSamples):
# sampling x
x=np.random.uniform(mybounds[0],mybounds[1],1)
# get f(x)
new_y_marginal_llk=self.log_marginal_lengthscale(x,noise_delta)
if new_y_marginal_llk>=y: # accept
lengthscale_samples[count]=np.asscalar(x)
# sampling y
y=np.random.uniform(cut_min,new_y_marginal_llk,1)
cut_min=y
count=count+1
else:
count_reject=count_reject+1
if count_reject>=3*nSamples:
lengthscale_samples[count:]=[lengthscale_samples[count-1]]*(nSamples-count)
break
#print lengthscale_samples
if any(lengthscale_samples)==0:
lengthscale_samples=[previous_theta]*nSamples
return np.asarray(lengthscale_samples)
def optimize_lengthscale_SE_loo(self,previous_theta,noise_delta):
"""
Optimize to select the optimal lengthscale parameter
"""
#print("maximizing lengthscale LOO")
dim=self.X.shape[1]
# define a bound on the lengthscale
bounds_lengthscale_min=0.000001*dim
bounds_lengthscale_max=1*dim
mybounds=[np.asarray([bounds_lengthscale_min,bounds_lengthscale_max]).T]
lengthscale_tries = np.random.uniform(bounds_lengthscale_min, bounds_lengthscale_max,size=(1000*dim, 1))
lengthscale_cluster = KMeans(n_clusters=10*dim, random_state=0).fit(lengthscale_tries)
#print lengthscale_cluster.cluster_centers_
lengthscale_tries=np.vstack((lengthscale_cluster.cluster_centers_,previous_theta,bounds_lengthscale_min))
#print lengthscale_tries
# evaluate
self.flagOptimizeHyperFirst=0 # for efficiency
logmarginal_tries=self.leave_one_out_lengthscale(lengthscale_tries,noise_delta)
#print logmarginal_tries
#find x optimal for init
idx_max=np.argmax(logmarginal_tries)
lengthscale_init_max=lengthscale_tries[idx_max]
#print lengthscale_init_max
myopts ={'maxiter':10,'maxfun':10}
x_max=[]
max_log_marginal=None
for i in range(dim):
res = minimize(lambda x: -self.leave_one_out_lengthscale(x,noise_delta),lengthscale_init_max,
bounds=mybounds,method="L-BFGS-B",options=myopts)#L-BFGS-B
if 'x' not in res:
val=self.leave_one_out_lengthscale(res,noise_delta)
else:
val=self.leave_one_out_lengthscale(res.x,noise_delta)
# Store it if better than previous minimum(maximum).
if max_log_marginal is None or val >= max_log_marginal:
if 'x' not in res:
x_max = res
else:
x_max = res.x
max_log_marginal = val
#print res.x
return x_max
def optimize_lengthscale_SE_maximizing(self,previous_theta,noise_delta):
"""
Optimize to select the optimal lengthscale parameter
"""
#print("maximizing lengthscale")
dim=self.X.shape[1]
# define a bound on the lengthscale
bounds_lengthscale_min=0.0000001
bounds_lengthscale_max=1*dim
mybounds=[np.asarray([bounds_lengthscale_min,bounds_lengthscale_max]).T]
lengthscale_tries = np.random.uniform(bounds_lengthscale_min, bounds_lengthscale_max,size=(1000*dim, 1))
lengthscale_cluster = KMeans(n_clusters=10*dim, random_state=0).fit(lengthscale_tries)
#print lengthscale_cluster.cluster_centers_
lengthscale_tries=np.vstack((lengthscale_cluster.cluster_centers_,previous_theta,bounds_lengthscale_min))
#print lengthscale_tries
# evaluate
self.flagOptimizeHyperFirst=0 # for efficiency
logmarginal_tries=self.log_marginal_lengthscale(lengthscale_tries,noise_delta)
#print logmarginal_tries
#find x optimal for init
idx_max=np.argmax(logmarginal_tries)
lengthscale_init_max=lengthscale_tries[idx_max]
#print lengthscale_init_max
myopts ={'maxiter':10,'maxfun':10}
x_max=[]
max_log_marginal=None
for i in range(1):
res = minimize(lambda x: -self.log_marginal_lengthscale(x,noise_delta),lengthscale_init_max,
bounds=mybounds,method="L-BFGS-B",options=myopts)#L-BFGS-B
if 'x' not in res:
val=self.log_marginal_lengthscale(res,noise_delta)
else:
val=self.log_marginal_lengthscale(res.x,noise_delta)
# Store it if better than previous minimum(maximum).
if max_log_marginal is None or val >= max_log_marginal:
if 'x' not in res:
x_max = res
else:
x_max = res.x
max_log_marginal = val
#print res.x
return x_max
def optimize_lengthscale(self,previous_theta,noise_delta):
if self.kernel_name == 'ARD':
return self.optimize_lengthscale_ARD(previous_theta,noise_delta)
if self.kernel_name=='SE':
return self.optimize_lengthscale_SE_maximizing(previous_theta,noise_delta)
def compute_var(self,X,xTest):
"""
compute variance given X and xTest
Input Parameters
----------
X: the observed points
xTest: the testing points
Returns
-------
diag(var)
"""
xTest=np.asarray(xTest)
xTest=np.atleast_2d(xTest)
if self.kernel_name=='SE':
#Euc_dist=euclidean_distances(xTest,xTest)
#KK_xTest_xTest=np.exp(-np.square(Euc_dist)/self.lengthscale)+np.eye(xTest.shape[0])*self.noise_delta
ur = unique_rows(X)
X=X[ur]
if xTest.shape[0]<=800:
Euc_dist_test_train=euclidean_distances(xTest,X)
#Euc_dist_test_train=dist(xTest, X, matmul='gemm', method='ext', precision='float32')
KK_xTest_xTrain=np.exp(-np.square(Euc_dist_test_train)/self.lengthscale)
else:
KK_xTest_xTrain=cdist(xTest,X,lambda a,b: self.kernel_dist(a,b,self.lengthscale))
Euc_dist_train_train=euclidean_distances(X,X)
self.KK_bucb_train_train=np.exp(-np.square(Euc_dist_train_train)/self.lengthscale)+np.eye(X.shape[0])*self.noise_delta
else:
#KK=pdist(xTest,lambda a,b: self.kernel_dist(a,b,self.lengthscale))
#KK=squareform(KK)
#KK_xTest_xTest=KK+np.eye(xTest.shape[0])*(1+self.noise_delta)
ur = unique_rows(X)
X=X[ur]
KK_xTest_xTrain=cdist(xTest,X,lambda a,b: self.kernel_dist(a,b,self.lengthscale))
self.KK_bucb_train_train=cdist(X,X,lambda a,b: self.kernel_dist(a,b,self.lengthscale))+np.eye(X.shape[0])*self.noise_delta
try:
temp=np.linalg.solve(self.KK_bucb_train_train,KK_xTest_xTrain.T)
except:
temp=np.linalg.lstsq(self.KK_bucb_train_train,KK_xTest_xTrain.T, rcond=-1)
temp=temp[0]
#var=KK_xTest_xTest-np.dot(temp.T,KK_xTest_xTrain.T)
var=np.eye(xTest.shape[0])-np.dot(temp.T,KK_xTest_xTrain.T)
var=np.diag(var)
var.flags['WRITEABLE']=True
var[var<1e-100]=0
return var
def predict_g2(self,xTest,eval_MSE=True):
"""
compute predictive mean and variance
Input Parameters
----------
xTest: the testing points
Returns
-------
mean, var
"""
if len(xTest.shape)==1: # 1d
xTest=xTest.reshape((-1,self.X.shape[1]))
# prevent singular matrix
ur = unique_rows(self.X)
X=self.X[ur]
Y=self.Y[ur]
G=self.G[ur]
#KK=pdist(xTest,lambda a,b: self.ARD_dist_func(a,b,self.theta))
if self.kernel_name=='SE':
Euc_dist=euclidean_distances(xTest,xTest)
KK_xTest_xTest=np.exp(-np.square(Euc_dist)/self.lengthscale)+np.eye(xTest.shape[0])*self.noise_delta
Euc_dist_test_train=euclidean_distances(xTest,X)
KK_xTest_xTrain=np.exp(-np.square(Euc_dist_test_train)/self.lengthscale)
else:
KK=pdist(xTest,lambda a,b: self.kernel_dist(a,b,self.lengthscale))
KK=squareform(KK)
KK_xTest_xTest=KK+np.eye(xTest.shape[0])+np.eye(xTest.shape[0])*self.noise_delta
KK_xTest_xTrain=cdist(xTest,X,lambda a,b: self.kernel_dist(a,b,self.lengthscale))
"""
temp=np.dot(KK_xTest_xTrain,self.KK_x_x_inv)
mean=np.dot(temp,Y)
var=KK_xTest_xTest-np.dot(temp,KK_xTest_xTrain.T)
"""
# using Cholesky update
#mean=np.dot(KK_xTest_xTrain,self.alpha)
meanG=np.dot(KK_xTest_xTrain,self.alphaG)
#v=np.linalg.solve(self.L,KK_xTest_xTrain.T)
#var=KK_xTest_xTest-np.dot(v.T,v)
v=np.linalg.solve(self.L,KK_xTest_xTrain.T)
varG=KK_xTest_xTest-np.dot(v.T,v)
# compute mF, varF
mf=self.fstar-0.5*meanG*meanG
varf=meanG*varG*meanG
#varf=varG
return mf.ravel(),np.diag(varf)
def predict(self,xTest,eval_MSE=True):
"""
compute predictive mean and variance
Input Parameters
----------
xTest: the testing points
Returns
-------
mean, var
"""
if len(xTest.shape)==1: # 1d
xTest=xTest.reshape((-1,self.X.shape[1]))
# prevent singular matrix
ur = unique_rows(self.X)
X=self.X[ur]
Y=self.Y[ur]
#Gtest=np.log(1.0*(self.fstar-))
#KK=pdist(xTest,lambda a,b: self.ARD_dist_func(a,b,self.theta))
if self.kernel_name=='SE':
Euc_dist=euclidean_distances(xTest,xTest)
KK_xTest_xTest=np.exp(-np.square(Euc_dist)/self.lengthscale)+np.eye(xTest.shape[0])*self.noise_delta
Euc_dist_test_train=euclidean_distances(xTest,X)
KK_xTest_xTrain=np.exp(-np.square(Euc_dist_test_train)/self.lengthscale)
else:
KK=pdist(xTest,lambda a,b: self.kernel_dist(a,b,self.lengthscale))
KK=squareform(KK)
KK_xTest_xTest=KK+np.eye(xTest.shape[0])+np.eye(xTest.shape[0])*self.noise_delta
KK_xTest_xTrain=cdist(xTest,X,lambda a,b: self.kernel_dist(a,b,self.lengthscale))
"""
temp=np.dot(KK_xTest_xTrain,self.KK_x_x_inv)
mean=np.dot(temp,Y)
var=KK_xTest_xTest-np.dot(temp,KK_xTest_xTrain.T)
"""
# using Cholesky update
#mean=np.dot(KK_xTest_xTrain,self.alpha)
meanG=np.dot(KK_xTest_xTrain,self.alphaG)+np.sqrt(2*self.fstar) # non zero prior mean
#v=np.linalg.solve(self.L,KK_xTest_xTrain.T)
#var=KK_xTest_xTest-np.dot(v.T,v)
v=np.linalg.solve(self.L,KK_xTest_xTrain.T)
varG=KK_xTest_xTest-np.dot(v.T,v)
# compute mF, varF
mf=self.fstar-0.5*np.square(meanG)
#mf=self.fstar-np.exp(meanG)
# using linearlisation
varf=meanG*varG*meanG
# using moment matching
"""
temp=np.diag(varG)
temp=np.atleast_2d(temp)
temp=np.reshape(temp,(-1,1))
temp2=np.square(meanG)
temp2=np.atleast_2d(temp2)
temp2=np.reshape(temp2,(-1,1))
mf=self.fstar-0.5*(temp2+temp)
varf=0.5*varG*varG+meanG*varG*meanG
"""
return mf.ravel(),np.diag(varf)
def predict_G(self,xTest,eval_MSE=True):
"""
compute predictive mean and variance
Input Parameters
----------
xTest: the testing points
Returns
-------
mean, var
"""
if len(xTest.shape)==1: # 1d
xTest=xTest.reshape((-1,self.X.shape[1]))
# prevent singular matrix
ur = unique_rows(self.X)
X=self.X[ur]
Y=self.Y[ur]
G=self.G[ur]
#KK=pdist(xTest,lambda a,b: self.ARD_dist_func(a,b,self.theta))
if self.kernel_name=='SE':
Euc_dist=euclidean_distances(xTest,xTest)
KK_xTest_xTest=np.exp(-np.square(Euc_dist)/self.lengthscale)+np.eye(xTest.shape[0])*self.noise_delta
Euc_dist_test_train=euclidean_distances(xTest,X)
KK_xTest_xTrain=np.exp(-np.square(Euc_dist_test_train)/self.lengthscale)
else:
KK=pdist(xTest,lambda a,b: self.kernel_dist(a,b,self.lengthscale))
KK=squareform(KK)
KK_xTest_xTest=KK+np.eye(xTest.shape[0])+np.eye(xTest.shape[0])*self.noise_delta
KK_xTest_xTrain=cdist(xTest,X,lambda a,b: self.kernel_dist(a,b,self.lengthscale))
"""
temp=np.dot(KK_xTest_xTrain,self.KK_x_x_inv)
mean=np.dot(temp,Y)
var=KK_xTest_xTest-np.dot(temp,KK_xTest_xTrain.T)
"""
meanG=np.dot(KK_xTest_xTrain,self.alphaG)+np.sqrt(2*self.fstar) # non zero prior mean
#v=np.linalg.solve(self.L,KK_xTest_xTrain.T)
#var=KK_xTest_xTest-np.dot(v.T,v)
v=np.linalg.solve(self.L,KK_xTest_xTrain.T)
varG=KK_xTest_xTest-np.dot(v.T,v)
return meanG.ravel(),np.diag(varG)
def posterior(self,x):
# compute mean function and covariance function
return self.predict(self,x)
| 36.0125
| 145
| 0.557445
|
7125ba15ba75babbd38fb6a83f111a81f95f73c4
| 83
|
py
|
Python
|
door/apps.py
|
Riphiphip/website
|
dc5bf64f24d5cf78661686af0281705f4d1d2576
|
[
"MIT"
] | null | null | null |
door/apps.py
|
Riphiphip/website
|
dc5bf64f24d5cf78661686af0281705f4d1d2576
|
[
"MIT"
] | null | null | null |
door/apps.py
|
Riphiphip/website
|
dc5bf64f24d5cf78661686af0281705f4d1d2576
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class DoorConfig(AppConfig):
name = 'door'
| 13.833333
| 33
| 0.73494
|
1ce42fd5e95385e39156ef0b6c104cdd842361eb
| 4,123
|
py
|
Python
|
harvesting/woodspinner.py
|
hoostus/prime-harvesting
|
6606b94ea7859fbf217dbea4ace856e3fa4d154e
|
[
"BlueOak-1.0.0",
"Apache-2.0"
] | 23
|
2016-09-07T06:13:37.000Z
|
2022-02-17T23:49:03.000Z
|
harvesting/woodspinner.py
|
hoostus/prime-harvesting
|
6606b94ea7859fbf217dbea4ace856e3fa4d154e
|
[
"BlueOak-1.0.0",
"Apache-2.0"
] | null | null | null |
harvesting/woodspinner.py
|
hoostus/prime-harvesting
|
6606b94ea7859fbf217dbea4ace856e3fa4d154e
|
[
"BlueOak-1.0.0",
"Apache-2.0"
] | 12
|
2016-06-30T17:27:39.000Z
|
2021-12-12T07:54:27.000Z
|
from .abc import HarvestingStrategy
from decimal import Decimal
# https://www.bogleheads.org/forum/viewtopic.php?f=10&t=269091#p4309134
# https://www.bogleheads.org/forum/viewtopic.php?f=10&t=269091#p4310215
# Bucket-1, 2 years in cash/short-term treas
# Bucket-2, 8 year in intermediate treas
# Bucket-3, equities
# Bucket-1+2 never less than 40% of the portfolio
# Bucket-3 can shift between 40-60%
# Buckets refilled yearly
# "for me the key is that my AA can shift within a band of 40/60 an 60/40"
# During Bear Markets the preference will be to spend down the fixed income side and use any excess capacity of the Fixed Income allocation band to buy equities.
# During Bull Markets, the preference will be to spend down from the Equity side and use any excess capacity to buy Fixed Income.
class WoodSpinner(HarvestingStrategy):
_StockCeiling = Decimal('0.6')
_StockFloor = Decimal('0.4')
def __init__(self, portfolio):
super().__init__(portfolio)
# we need 8% of the portfolio (2 years @ 4%) in cash
self.portfolio.sell_bonds(self.portfolio.value * Decimal('.08'))
self.high_water = self.portfolio.stocks_real_gain
def is_bear(self):
""" this really needs to account for withdrawals...ugh """
if self.portfolio.stocks_real_gain > self.high_water:
self.high_water = self.portfolio.stocks_real_gain
if self.portfolio.stocks_real_gain <= self.high_water * Decimal('0.8'):
return True
else:
return False
def do_harvest(self, amount):
current_amount = amount
# are we in a bear or a bull?
if self.is_bear():
# first withdraw from cash
cash_amount = min(amount, self.portfolio.cash)
# don't need to do anything here, the withdrawal actually happens elsewhere
#self.portfolio.withdraw_cash(cash_amount)
amount -= cash_amount
# then withdraw from bonds
bond_amount = min(amount, self.portfolio.bonds)
self.portfolio.sell_bonds(bond_amount)
amount -= bond_amount
# withdraw anything else we need from stocks; if we're not in a
# bear market this means withdraw everything from stocks
stock_amount = min(amount, self.portfolio.stocks)
self.portfolio.sell_stocks(stock_amount)
amount -= stock_amount
def get_stock_pct():
if (self.portfolio.value - current_amount) == 0:
return 0
else:
return (self.portfolio.stocks) / (self.portfolio.value - current_amount)
# if we have any money left over after withdrawals we can rebalance
if self.portfolio.value > current_amount:
# we are over 60/40
if get_stock_pct() > self._StockCeiling:
# rebalance into bonds
stock_target = (self.portfolio.value - current_amount) * self._StockCeiling
sell_stocks = self.portfolio.stocks - stock_target
self.portfolio.sell_stocks(sell_stocks)
self.portfolio.buy_bonds(sell_stocks)
# if we are under 40/60
if get_stock_pct() < self._StockFloor:
# rebalance into stocks...this is slightly tricky
# because we need to use bonds+cash to do this
# first use any cash to buy bonds. then we just need
# to rebalance between stocks + bonds.
cash = (self.portfolio.cash - current_amount)
self.portfolio.buy_bonds(cash)
stock_target = (self.portfolio.value - current_amount) * self._StockFloor
buy_stocks = stock_target - self.portfolio.stocks
self.portfolio.sell_bonds(buy_stocks)
self.portfolio.buy_stocks(buy_stocks)
# finally, see if we need to refill the cash bucket...
if (self.portfolio.cash - current_amount) == 0:
sell_bonds = min(self.portfolio.bonds, 2 * current_amount)
self.portfolio.sell_bonds(sell_bonds)
| 42.947917
| 162
| 0.640796
|
764d2955e67ad04964d3da924ae5d37c90b7e727
| 1,347
|
py
|
Python
|
tools/scrape_ab.py
|
EricssonResearch/micro-bench
|
91c2f7cbc5cec8bc4252b24cf4fd124b589bce05
|
[
"Apache-2.0"
] | null | null | null |
tools/scrape_ab.py
|
EricssonResearch/micro-bench
|
91c2f7cbc5cec8bc4252b24cf4fd124b589bce05
|
[
"Apache-2.0"
] | null | null | null |
tools/scrape_ab.py
|
EricssonResearch/micro-bench
|
91c2f7cbc5cec8bc4252b24cf4fd124b589bce05
|
[
"Apache-2.0"
] | null | null | null |
# concurrency level is not needed, timestamp is needed though, also a print out per test, not just the last one
import sys
concurrency = ""
completeRequests = ""
failedRequests = ""
keepAlive = ""
tps = ""
fileName = sys.argv[1]
with open(fileName, "r") as f:
print "concurrency\tcompletedRequests\tfailedRequests\tkeepAlive\ttps"
for line in f:
if line.startswith("Concurrency Level:"):
c = line.strip().split(":")[1]
if c != concurrency:
if concurrency != "" or concurrency == c:
print concurrency + "\t" + completeRequests + "\t" + failedRequests + "\t" + keepAlive + "\t" + tps
concurrency = str(c.strip())
completeRequests = ""
failedRequests = ""
keepAlive = ""
tps = ""
continue
if line.startswith("Complete requests:"):
completeRequests = str(line.strip().split(":")[1].strip())
continue
if line.startswith("Failed requests:"):
failedRequests = str(line.strip().split(":")[1].strip())
continue
if line.startswith("Keep-Alive requests:"):
keepAlive = str(line.strip().split(":")[1].strip())
continue
if line.startswith("Requests per second: "):
tps = str(line.strip().split(" ")[6]).replace(".",",")
continue
# print the last line - let's not forget that
print concurrency + "\t" + completeRequests + "\t" + failedRequests + "\t" + keepAlive + "\t" + tps
| 29.282609
| 111
| 0.64588
|
a88b54b0b41ac78f67b64b2f80dfeb015a303edd
| 3,314
|
py
|
Python
|
grr/server/grr_response_server/output_plugins/yaml_plugin.py
|
dekoder/grr
|
27ba38dc0f5ad4f3e0cdbfb146a0a789e3b0d27b
|
[
"Apache-2.0"
] | 3
|
2018-09-30T01:31:29.000Z
|
2019-04-22T11:44:54.000Z
|
grr/server/grr_response_server/output_plugins/yaml_plugin.py
|
tomchop/grr
|
27ba38dc0f5ad4f3e0cdbfb146a0a789e3b0d27b
|
[
"Apache-2.0"
] | 1
|
2022-03-02T09:58:05.000Z
|
2022-03-02T09:58:05.000Z
|
grr/server/grr_response_server/output_plugins/yaml_plugin.py
|
tomchop/grr
|
27ba38dc0f5ad4f3e0cdbfb146a0a789e3b0d27b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Plugins that produce results in YAML."""
from __future__ import unicode_literals
import io
import os
import zipfile
import yaml
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_server import instant_output_plugin
def _SerializeToYaml(value):
preserialized = []
if isinstance(value, rdf_structs.RDFProtoStruct):
preserialized.append(value.ToPrimitiveDict(serialize_leaf_fields=True))
else:
preserialized.append(utils.SmartStr(value))
# Produce a YAML list entry in block format.
# Note that the order of the fields is not guaranteed to correspond to that of
# other output formats.
return yaml.safe_dump(preserialized, default_flow_style=False)
class YamlInstantOutputPluginWithExportConversion(
instant_output_plugin.InstantOutputPluginWithExportConversion):
"""Instant output plugin that flattens results into YAML."""
plugin_name = "flattened-yaml-zip"
friendly_name = "Flattened YAML (zipped)"
description = "Output ZIP archive with YAML files (flattened)."
output_file_extension = ".zip"
ROW_BATCH = 100
def __init__(self, *args, **kwargs):
super(YamlInstantOutputPluginWithExportConversion, self).__init__(
*args, **kwargs)
self.archive_generator = None # Created in Start()
self.export_counts = {}
@property
def path_prefix(self):
prefix, _ = os.path.splitext(self.output_file_name)
return prefix
def Start(self):
self.archive_generator = utils.StreamingZipGenerator(
compression=zipfile.ZIP_DEFLATED)
self.export_counts = {}
return []
def ProcessSingleTypeExportedValues(self, original_value_type,
exported_values):
first_value = next(exported_values, None)
if not first_value:
return
yield self.archive_generator.WriteFileHeader(
"%s/%s/from_%s.yaml" % (self.path_prefix,
first_value.__class__.__name__,
original_value_type.__name__))
yield self.archive_generator.WriteFileChunk(_SerializeToYaml(first_value))
counter = 1
for batch in utils.Grouper(exported_values, self.ROW_BATCH):
counter += len(batch)
# TODO(hanuszczak): YAML is supposed to be a unicode file format so we
# should use `StringIO` here instead. However, because PyYAML dumps to
# `bytes` instead of `unicode` we have to use `BytesIO`. It should be
# investigated whether there is a way to adjust behaviour of PyYAML.
buf = io.BytesIO()
for value in batch:
buf.write(b"\n")
buf.write(_SerializeToYaml(value))
yield self.archive_generator.WriteFileChunk(buf.getvalue())
yield self.archive_generator.WriteFileFooter()
counts_for_original_type = self.export_counts.setdefault(
original_value_type.__name__, dict())
counts_for_original_type[first_value.__class__.__name__] = counter
def Finish(self):
manifest = {"export_stats": self.export_counts}
yield self.archive_generator.WriteFileHeader(self.path_prefix + "/MANIFEST")
yield self.archive_generator.WriteFileChunk(yaml.safe_dump(manifest))
yield self.archive_generator.WriteFileFooter()
yield self.archive_generator.Close()
| 35.634409
| 80
| 0.726614
|
45e549146ed99eb3a2a458b2eb832385e5f9394a
| 10,556
|
py
|
Python
|
lib/x509.py
|
greenmo000/electrum-britcoin
|
ad0248eb552d29c41ecd06d8b0a1f7f8179d1ee8
|
[
"MIT"
] | null | null | null |
lib/x509.py
|
greenmo000/electrum-britcoin
|
ad0248eb552d29c41ecd06d8b0a1f7f8179d1ee8
|
[
"MIT"
] | null | null | null |
lib/x509.py
|
greenmo000/electrum-britcoin
|
ad0248eb552d29c41ecd06d8b0a1f7f8179d1ee8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Electrum - lightweight Britcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from datetime import datetime
import sys
import util
from util import profiler, print_error
import ecdsa
import hashlib
# algo OIDs
ALGO_RSA_SHA1 = '1.2.840.113549.1.1.5'
ALGO_RSA_SHA256 = '1.2.840.113549.1.1.11'
ALGO_RSA_SHA384 = '1.2.840.113549.1.1.12'
ALGO_RSA_SHA512 = '1.2.840.113549.1.1.13'
ALGO_ECDSA_SHA256 = '1.2.840.10045.4.3.2'
# prefixes, see http://stackoverflow.com/questions/3713774/c-sharp-how-to-calculate-asn-1-der-encoding-of-a-particular-hash-algorithm
PREFIX_RSA_SHA256 = bytearray([0x30,0x31,0x30,0x0d,0x06,0x09,0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x02,0x01,0x05,0x00,0x04,0x20])
PREFIX_RSA_SHA384 = bytearray([0x30,0x41,0x30,0x0d,0x06,0x09,0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x02,0x02,0x05,0x00,0x04,0x30])
PREFIX_RSA_SHA512 = bytearray([0x30,0x51,0x30,0x0d,0x06,0x09,0x60,0x86,0x48,0x01,0x65,0x03,0x04,0x02,0x03,0x05,0x00,0x04,0x40])
# types used in ASN1 structured data
ASN1_TYPES = {
'BOOLEAN': 0x01,
'INTEGER': 0x02,
'BIT STRING': 0x03,
'OCTET STRING': 0x04,
'NULL': 0x05,
'OBJECT IDENTIFIER': 0x06,
'SEQUENCE': 0x70,
'SET': 0x71,
'PrintableString': 0x13,
'IA5String': 0x16,
'UTCTime': 0x17,
'ENUMERATED': 0x0A,
'UTF8String': 0x0C,
'PrintableString': 0x13,
}
class CertificateError(Exception):
pass
# helper functions
def bitstr_to_bytestr(s):
if s[0] != '\x00':
raise BaseException('no padding')
return s[1:]
def bytestr_to_int(s):
i = 0
for char in s:
i <<= 8
i |= ord(char)
return i
def decode_OID(s):
s = map(ord, s)
r = []
r.append(s[0] / 40)
r.append(s[0] % 40)
k = 0
for i in s[1:]:
if i < 128:
r.append(i + 128*k)
k = 0
else:
k = (i - 128) + 128*k
return '.'.join(map(str, r))
def encode_OID(oid):
x = map(int, oid.split('.'))
s = chr(x[0]*40 + x[1])
for i in x[2:]:
ss = chr(i % 128)
while i > 128:
i = i / 128
ss = chr(128 + i % 128) + ss
s += ss
return s
class ASN1_Node(str):
def get_node(self, ix):
# return index of first byte, first content byte and last byte.
first = ord(self[ix+1])
if (ord(self[ix+1]) & 0x80) == 0:
length = first
ixf = ix + 2
ixl = ixf + length - 1
else:
lengthbytes = first & 0x7F
length = bytestr_to_int(self[ix+2:ix+2+lengthbytes])
ixf = ix + 2 + lengthbytes
ixl = ixf + length -1
return (ix, ixf, ixl)
def root(self):
return self.get_node(0)
def next_node(self, node):
ixs, ixf, ixl = node
return self.get_node(ixl + 1)
def first_child(self, node):
ixs, ixf, ixl = node
if ord(self[ixs]) & 0x20 != 0x20:
raise BaseException('Can only open constructed types.', hex(ord(self[ixs])))
return self.get_node(ixf)
def is_child_of(node1, node2):
ixs, ixf, ixl = node1
jxs, jxf, jxl = node2
return ( (ixf <= jxs) and (jxl <= ixl) ) or ( (jxf <= ixs) and (ixl <= jxl) )
def get_all(self, node):
# return type + length + value
ixs, ixf, ixl = node
return self[ixs:ixl+1]
def get_value_of_type(self, node, asn1_type):
# verify type byte and return content
ixs, ixf, ixl = node
if ASN1_TYPES[asn1_type] != ord(self[ixs]):
raise BaseException('Wrong type:', hex(ord(self[ixs])), hex(ASN1_TYPES[asn1_type]) )
return self[ixf:ixl+1]
def get_value(self, node):
ixs, ixf, ixl = node
return self[ixf:ixl+1]
def get_children(self, node):
nodes = []
ii = self.first_child(node)
nodes.append(ii)
while ii[2] < node[2]:
ii = self.next_node(ii)
nodes.append(ii)
return nodes
def get_sequence(self):
return map(lambda j: self.get_value(j), self.get_children(self.root()))
def get_dict(self, node):
p = {}
for ii in self.get_children(node):
for iii in self.get_children(ii):
iiii = self.first_child(iii)
oid = decode_OID(self.get_value_of_type(iiii, 'OBJECT IDENTIFIER'))
iiii = self.next_node(iiii)
value = self.get_value(iiii)
p[oid] = value
return p
class X509(object):
def __init__(self, b):
self.bytes = bytearray(b)
der = ASN1_Node(str(b))
root = der.root()
cert = der.first_child(root)
# data for signature
self.data = der.get_all(cert)
# optional version field
if der.get_value(cert)[0] == chr(0xa0):
version = der.first_child(cert)
serial_number = der.next_node(version)
else:
serial_number = der.first_child(cert)
self.serial_number = bytestr_to_int(der.get_value_of_type(serial_number, 'INTEGER'))
# signature algorithm
sig_algo = der.next_node(serial_number)
ii = der.first_child(sig_algo)
self.sig_algo = decode_OID(der.get_value_of_type(ii, 'OBJECT IDENTIFIER'))
# issuer
issuer = der.next_node(sig_algo)
self.issuer = der.get_dict(issuer)
# validity
validity = der.next_node(issuer)
ii = der.first_child(validity)
self.notBefore = der.get_value_of_type(ii, 'UTCTime')
ii = der.next_node(ii)
self.notAfter = der.get_value_of_type(ii, 'UTCTime')
# subject
subject = der.next_node(validity)
self.subject = der.get_dict(subject)
subject_pki = der.next_node(subject)
public_key_algo = der.first_child(subject_pki)
ii = der.first_child(public_key_algo)
self.public_key_algo = decode_OID(der.get_value_of_type(ii, 'OBJECT IDENTIFIER'))
# pubkey modulus and exponent
subject_public_key = der.next_node(public_key_algo)
spk = der.get_value_of_type(subject_public_key, 'BIT STRING')
spk = ASN1_Node(bitstr_to_bytestr(spk))
r = spk.root()
modulus = spk.first_child(r)
exponent = spk.next_node(modulus)
rsa_n = spk.get_value_of_type(modulus, 'INTEGER')
rsa_e = spk.get_value_of_type(exponent, 'INTEGER')
self.modulus = ecdsa.util.string_to_number(rsa_n)
self.exponent = ecdsa.util.string_to_number(rsa_e)
# extensions
self.CA = False
self.AKI = None
self.SKI = None
i = subject_pki
while i[2] < cert[2]:
i = der.next_node(i)
d = der.get_dict(i)
for oid, value in d.items():
value = ASN1_Node(value)
if oid == '2.5.29.19':
# Basic Constraints
self.CA = bool(value)
elif oid == '2.5.29.14':
# Subject Key Identifier
r = value.root()
value = value.get_value_of_type(r, 'OCTET STRING')
self.SKI = value.encode('hex')
elif oid == '2.5.29.35':
# Authority Key Identifier
self.AKI = value.get_sequence()[0].encode('hex')
else:
pass
# cert signature
cert_sig_algo = der.next_node(cert)
ii = der.first_child(cert_sig_algo)
self.cert_sig_algo = decode_OID(der.get_value_of_type(ii, 'OBJECT IDENTIFIER'))
cert_sig = der.next_node(cert_sig_algo)
self.signature = der.get_value(cert_sig)[1:]
def get_keyID(self):
# http://security.stackexchange.com/questions/72077/validating-an-ssl-certificate-chain-according-to-rfc-5280-am-i-understanding-th
return self.SKI if self.SKI else repr(self.subject)
def get_issuer_keyID(self):
return self.AKI if self.AKI else repr(self.issuer)
def get_common_name(self):
return self.subject.get('2.5.4.3', 'unknown')
def get_signature(self):
return self.cert_sig_algo, self.signature, self.data
def check_ca(self):
return self.CA
def check_date(self):
import time
now = time.time()
TIMESTAMP_FMT = '%y%m%d%H%M%SZ'
not_before = time.mktime(time.strptime(self.notBefore, TIMESTAMP_FMT))
not_after = time.mktime(time.strptime(self.notAfter, TIMESTAMP_FMT))
if not_before > now:
raise CertificateError('Certificate has not entered its valid date range.')
if not_after <= now:
raise CertificateError('Certificate has expired.')
def getFingerprint(self):
return hashlib.sha1(self.bytes).digest()
@profiler
def load_certificates(ca_path):
import pem
ca_list = {}
ca_keyID = {}
with open(ca_path, 'r') as f:
s = f.read()
bList = pem.dePemList(s, "CERTIFICATE")
for b in bList:
try:
x = X509(b)
x.check_date()
except BaseException as e:
util.print_error("cert error:", e)
continue
fp = x.getFingerprint()
ca_list[fp] = x
ca_keyID[x.get_keyID()] = fp
return ca_list, ca_keyID
if __name__ == "__main__":
import requests
util.set_verbosity(True)
ca_path = requests.certs.where()
ca_list, ca_keyID = load_certificates(ca_path)
| 31.510448
| 139
| 0.606101
|
7fc14913533160e0e5f63638aff45acdc4b6592c
| 3,781
|
py
|
Python
|
contrib/macdeploy/custom_dsstore.py
|
riancoin/yancoin
|
7390c75a5ba56a0b9395dc38b947878c4d7ad9b8
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
riancoin/yancoin
|
7390c75a5ba56a0b9395dc38b947878c4d7ad9b8
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
riancoin/yancoin
|
7390c75a5ba56a0b9395dc38b947878c4d7ad9b8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00riancoinuser:\x00Documents:\x00riancoin:\x00riancoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/riancoinuser/Documents/riancoin/riancoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Riancoin-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.983607
| 1,817
| 0.72785
|
05a07f14964db79db40228c119b2cdbb0198a1dd
| 5,933
|
py
|
Python
|
package_control/bootstrap.py
|
William-Cao/Less-
|
b4cb03d2457bcac29ba3cfc13689098aee103971
|
[
"Unlicense",
"MIT"
] | 3
|
2019-06-06T00:13:44.000Z
|
2020-08-16T20:11:13.000Z
|
package_control/bootstrap.py
|
Allyn69/package_control
|
f78578ed67529e263fb1f4e4f90f92295830560f
|
[
"MIT",
"Unlicense"
] | null | null | null |
package_control/bootstrap.py
|
Allyn69/package_control
|
f78578ed67529e263fb1f4e4f90f92295830560f
|
[
"MIT",
"Unlicense"
] | 1
|
2021-07-26T00:35:53.000Z
|
2021-07-26T00:35:53.000Z
|
import zipfile
import os
import hashlib
import json
from os import path
try:
from urlparse import urlparse
str_cls = unicode # noqa
from cStringIO import StringIO as BytesIO
package_control_dir = os.getcwd()
except (ImportError) as e:
from urllib.parse import urlparse
str_cls = str
from io import BytesIO
package_control_dir = path.dirname(path.dirname(__file__))
import sublime
from .clear_directory import clear_directory
from .download_manager import downloader
from .downloaders.downloader_exception import DownloaderException
from .console_write import console_write
from . import loader, sys_path
from .open_compat import open_compat, read_compat
from .semver import SemVer
from .file_not_found_error import FileNotFoundError
from .settings import pc_settings_filename
def mark_bootstrapped():
"""
Mark Package Control as successfully bootstrapped
"""
pc_settings = sublime.load_settings(pc_settings_filename())
if not pc_settings.get('bootstrapped'):
pc_settings.set('bootstrapped', True)
sublime.save_settings(pc_settings_filename())
def bootstrap_dependency(settings, url, hash_, priority, version, on_complete):
"""
Downloads a dependency from a hard-coded URL - only used for bootstrapping _ssl
on Linux and ST2/Windows
:param settings:
Package Control settings
:param url:
The non-secure URL to download from
:param hash_:
The sha256 hash of the package file
:param version:
The version number of the package
:param priority:
A three-digit number that controls what order packages are
injected in
:param on_complete:
A callback to be run in the main Sublime thread, so it can use the API
"""
package_filename = path.basename(urlparse(url).path)
package_basename, _ = path.splitext(package_filename)
package_dir = path.join(sys_path.packages_path, package_basename)
version = SemVer(version)
# The package has already been installed. Don't reinstall unless we have
# a newer version.
if path.exists(package_dir) and loader.exists(package_basename):
try:
dep_metadata_path = path.join(package_dir, 'dependency-metadata.json')
with open_compat(dep_metadata_path, 'r') as f:
metadata = json.loads(read_compat(f))
old_version = SemVer(metadata['version'])
if version <= old_version:
sublime.set_timeout(mark_bootstrapped, 10)
return
console_write(
u'''
Upgrading bootstrapped dependency %s to %s from %s
''',
(package_basename, version, old_version)
)
except (KeyError, FileNotFoundError):
# If we can't determine the old version, install the new one
pass
with downloader(url, settings) as manager:
try:
console_write(
u'''
Downloading bootstrapped dependency %s
''',
package_basename
)
data = manager.fetch(url, 'Error downloading bootstrapped dependency %s.' % package_basename)
console_write(
u'''
Successfully downloaded bootstraped dependency %s
''',
package_basename
)
data_io = BytesIO(data)
except (DownloaderException) as e:
console_write(e)
return
data_hash = hashlib.sha256(data).hexdigest()
if data_hash != hash_:
console_write(
u'''
Error validating bootstrapped dependency %s (got %s instead of %s)
''',
(package_basename, data_hash, hash_)
)
return
try:
data_zip = zipfile.ZipFile(data_io, 'r')
except (zipfile.BadZipfile):
console_write(
u'''
Error unzipping bootstrapped dependency %s
''',
package_filename
)
return
if not path.exists(package_dir):
os.makedirs(package_dir, 0o755)
else:
clear_directory(package_dir)
code = None
for zip_path in data_zip.namelist():
dest = zip_path
if not isinstance(dest, str_cls):
dest = dest.decode('utf-8', 'strict')
dest = dest.replace('\\', '/')
# loader.py is included for backwards compatibility. New code should use
# loader.code with Python inside of it. We no longer use loader.py since
# we can't have any files ending in .py in the root of a package,
# otherwise Sublime Text loads it as a plugin and then the dependency
# path added to sys.path and the package path loaded by Sublime Text
# conflict and there will be errors when Sublime Text tries to
# initialize plugins. By using loader.code, developers can git clone a
# dependency into their Packages folder without issue.
if dest in set([u'loader.py', u'loader.code']):
code = data_zip.read(zip_path).decode('utf-8')
if dest == u'loader.py':
continue
dest = path.join(package_dir, dest)
if dest[-1] == '/':
if not path.exists(dest):
os.makedirs(dest, 0o755)
else:
dest_dir = path.dirname(dest)
if not path.exists(dest_dir):
os.makedirs(dest_dir, 0o755)
with open(dest, 'wb') as f:
f.write(data_zip.read(zip_path))
data_zip.close()
loader.add_or_update(priority, package_basename, code)
console_write(
u'''
Successfully installed bootstrapped dependency %s
''',
package_basename
)
sublime.set_timeout(mark_bootstrapped, 10)
if on_complete:
sublime.set_timeout(on_complete, 100)
| 30.740933
| 105
| 0.623968
|
202f80dcfefe2f6154658f94fa84ae8317a3803d
| 338
|
py
|
Python
|
metoffice_ec2/timer.py
|
openclimatefix/metoffice_aws_ec2
|
6cda8f583f3ceb4fdc3920e1f13f657039404b8a
|
[
"MIT"
] | 8
|
2020-05-20T20:06:47.000Z
|
2021-06-25T02:43:10.000Z
|
metoffice_ec2/timer.py
|
openclimatefix/metoffice_ec2
|
6cda8f583f3ceb4fdc3920e1f13f657039404b8a
|
[
"MIT"
] | 37
|
2020-05-15T12:20:25.000Z
|
2021-02-12T17:01:33.000Z
|
metoffice_ec2/timer.py
|
openclimatefix/metoffice_aws_ec2
|
6cda8f583f3ceb4fdc3920e1f13f657039404b8a
|
[
"MIT"
] | 2
|
2020-05-21T13:21:00.000Z
|
2020-05-24T09:35:29.000Z
|
import logging
import time
_LOG = logging.getLogger("metoffice_ec2")
class Timer:
def __init__(self):
self.t = time.time()
def tick(self, label=""):
now = time.time()
time_since_last_tick = now - self.t
self.t = now
_LOG.info("{} took {:.2f} secs.".format(label, time_since_last_tick))
| 21.125
| 77
| 0.609467
|
01b6d6798a80161fc06aff546a39235e17298653
| 5,760
|
py
|
Python
|
accounts/migrations/0001_initial.py
|
ZviBaratz/pylabber
|
35337284f3d0615249f642743b993b7dad407390
|
[
"Apache-2.0"
] | 3
|
2020-08-28T21:33:07.000Z
|
2021-07-19T17:52:17.000Z
|
accounts/migrations/0001_initial.py
|
TheLabbingProject/pylabber
|
27d6073e7bde871c16912a8ea5e0e389711bbd9f
|
[
"Apache-2.0"
] | 74
|
2019-09-04T11:40:16.000Z
|
2022-01-03T19:43:04.000Z
|
accounts/migrations/0001_initial.py
|
ZviBaratz/pylabber
|
35337284f3d0615249f642743b993b7dad407390
|
[
"Apache-2.0"
] | 3
|
2019-05-07T07:09:05.000Z
|
2019-08-30T15:40:47.000Z
|
# Generated by Django 3.0.8 on 2020-07-12 07:21
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import django_extensions.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Laboratory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('title', models.CharField(max_length=255, verbose_name='title')),
('description', models.TextField(blank=True, null=True, verbose_name='description')),
('image', models.ImageField(blank=True, null=True, upload_to='images/labs')),
],
options={
'verbose_name_plural': 'Laboratories',
'ordering': ('title',),
},
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, choices=[('BSC', 'B.Sc.'), ('MSC', 'M.Sc.'), ('PHD', 'Ph.D.'), ('PROF', 'Prof.')], default='', max_length=20, null=True)),
('image', models.ImageField(blank=True, upload_to='images/profiles')),
('date_of_birth', models.DateField(blank=True, default=None, null=True)),
('institute', models.CharField(blank=True, max_length=255, null=True)),
('bio', models.TextField(blank=True, max_length=500, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='LaboratoryMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.CharField(blank=True, choices=[('RA', 'Research Assistant'), ('MSC', 'M.Sc. Student'), ('PHD', 'Ph.D. Candidate'), ('POST', 'Postdoctoral Researcher'), ('AFF', 'Research Affiliate'), ('MAN', 'Lab Manager'), ('PI', 'Principle Investigator')], default='', max_length=20, null=True)),
('laboratory', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Laboratory')),
('member', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='laboratory',
name='members',
field=models.ManyToManyField(blank=True, through='accounts.LaboratoryMembership', to=settings.AUTH_USER_MODEL),
),
]
| 64.719101
| 329
| 0.633854
|
ba4d5891155823906291b488e7ac86faa15a23d6
| 713
|
py
|
Python
|
examples/Example.py
|
keshavm021/Chatbot
|
2e52b2aeb4bfbf5295a557f5422cdfdddddf891c
|
[
"MIT"
] | null | null | null |
examples/Example.py
|
keshavm021/Chatbot
|
2e52b2aeb4bfbf5295a557f5422cdfdddddf891c
|
[
"MIT"
] | null | null | null |
examples/Example.py
|
keshavm021/Chatbot
|
2e52b2aeb4bfbf5295a557f5422cdfdddddf891c
|
[
"MIT"
] | null | null | null |
from chatbot import Chat,reflections,multiFunctionCall
import wikipedia,os
import warnings
warnings.filterwarnings("ignore")
def whoIs(query,sessionID="general"):
try:
return wikipedia.summary(query)
except:
for newquery in wikipedia.search(query):
try:
return wikipedia.summary(newquery)
except:
pass
return "I don't know about "+query
call = multiFunctionCall({"whoIs":whoIs})
firstQuestion="Hi, WELCOME TO COLLEGE ENQUIRY SYSTEM?"
chat = Chat(os.path.join(os.path.dirname(os.path.abspath(__file__)),"Example.template"), reflections,call=call)
chat.converse(firstQuestion)
#chat.save_template("test.template")
| 28.52
| 111
| 0.685835
|
1cb117609bd3855dc0128d887743c8b7e1a92b3f
| 187
|
py
|
Python
|
src/npi/dataset/__init__.py
|
NancyFulda/towards-neural-programming-interfaces
|
21b467af56848c4fc8642fb0412f9f8d1b7718a2
|
[
"Apache-2.0"
] | null | null | null |
src/npi/dataset/__init__.py
|
NancyFulda/towards-neural-programming-interfaces
|
21b467af56848c4fc8642fb0412f9f8d1b7718a2
|
[
"Apache-2.0"
] | 1
|
2022-02-01T02:51:51.000Z
|
2022-02-01T02:51:51.000Z
|
src/npi/dataset/__init__.py
|
NancyFulda/towards-neural-programming-interfaces
|
21b467af56848c4fc8642fb0412f9f8d1b7718a2
|
[
"Apache-2.0"
] | 2
|
2022-02-07T16:39:02.000Z
|
2022-03-21T16:08:22.000Z
|
# This package defines the creation/handling/structure of the dataset used to train the NPI
from .construct_dataset import NPIDatasetConstructor
from .npi_dataset import NPIDatasetLoader
| 46.75
| 91
| 0.855615
|
61dc7071240a29407f88c3aeedcfcdbcde274aed
| 6,244
|
py
|
Python
|
extra_tests/snippets/builtin_complex.py
|
rng-dynamics/RustPython
|
6165aadcc4e80e0f48f3e784e17b3c7f80d21a8a
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
extra_tests/snippets/builtin_complex.py
|
rng-dynamics/RustPython
|
6165aadcc4e80e0f48f3e784e17b3c7f80d21a8a
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
extra_tests/snippets/builtin_complex.py
|
rng-dynamics/RustPython
|
6165aadcc4e80e0f48f3e784e17b3c7f80d21a8a
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
import testutils
from testutils import assert_raises
# __abs__
assert abs(complex(3, 4)) == 5
assert abs(complex(3, -4)) == 5
assert abs(complex(1.5, 2.5)) == 2.9154759474226504
# __eq__
assert complex(1, -1) == complex(1, -1)
assert complex(1, 0) == 1
assert 1 == complex(1, 0)
assert complex(1, 1) != 1
assert 1 != complex(1, 1)
assert complex(1, 0) == 1.0
assert 1.0 == complex(1, 0)
assert complex(1, 1) != 1.0
assert 1.0 != complex(1, 1)
assert complex(1, 0) != 1.5
assert not 1.0 != complex(1, 0)
assert bool(complex(1, 0))
assert complex(1, 2) != complex(1, 1)
assert complex(1, 2) != 'foo'
assert complex(1, 2).__eq__('foo') == NotImplemented
assert 1j != 10 ** 1000
# __mul__, __rmul__
assert complex(2, -3) * complex(-5, 7) == complex(11, 29)
assert complex(2, -3) * 5 == complex(10, -15)
assert 5 * complex(2, -3) == complex(2, -3) * 5
# __truediv__, __rtruediv__
assert complex(2, -3) / 2 == complex(1, -1.5)
assert 5 / complex(3, -4) == complex(0.6, 0.8)
# __mod__, __rmod__
# "can't mod complex numbers.
assert_raises(TypeError, lambda: complex(2, -3) % 2)
assert_raises(TypeError, lambda: 2 % complex(2, -3))
# __floordiv__, __rfloordiv__
# can't take floor of complex number.
assert_raises(TypeError, lambda: complex(2, -3) // 2)
assert_raises(TypeError, lambda: 2 // complex(2, -3))
# __divmod__, __rdivmod__
# "can't take floor or mod of complex number."
assert_raises(TypeError, lambda: divmod(complex(2, -3), 2))
assert_raises(TypeError, lambda: divmod(2, complex(2, -3)))
# __pow__, __rpow__
# assert 1j ** 2 == -1
assert complex(1) ** 2 == 1
assert 2 ** complex(2) == 4
# __pos__
assert +complex(0, 1) == complex(0, 1)
assert +complex(1, 0) == complex(1, 0)
assert +complex(1, -1) == complex(1, -1)
assert +complex(0, 0) == complex(0, 0)
# __neg__
assert -complex(1, -1) == complex(-1, 1)
assert -complex(0, 0) == complex(0, 0)
# __bool__
assert bool(complex(0, 0)) is False
assert bool(complex(0, 1)) is True
assert bool(complex(1, 0)) is True
# __hash__
assert hash(complex(1)) == hash(float(1)) == hash(int(1))
assert hash(complex(-1)) == hash(float(-1)) == hash(int(-1))
assert hash(complex(3.14)) == hash(float(3.14))
assert hash(complex(-float('inf'))) == hash(-float('inf'))
assert hash(1j) != hash(1)
# TODO: Find a way to test platform dependent values
assert hash(3.1 - 4.2j) == hash(3.1 - 4.2j)
assert hash(3.1 + 4.2j) == hash(3.1 + 4.2j)
# numbers.Complex
a = complex(3, 4)
b = 4j
assert a.real == 3
assert b.real == 0
assert a.imag == 4
assert b.imag == 4
assert a.conjugate() == 3 - 4j
assert b.conjugate() == -4j
# int and complex addition
assert 1 + 1j == complex(1, 1)
assert 1j + 1 == complex(1, 1)
assert (1j + 1) + 3 == complex(4, 1)
assert 3 + (1j + 1) == complex(4, 1)
# float and complex addition
assert 1.1 + 1.2j == complex(1.1, 1.2)
assert 1.3j + 1.4 == complex(1.4, 1.3)
assert (1.5j + 1.6) + 3 == complex(4.6, 1.5)
assert 3.5 + (1.1j + 1.2) == complex(4.7, 1.1)
# subtraction
assert 1 - 1j == complex(1, -1)
assert 1j - 1 == complex(-1, 1)
assert 2j - 1j == complex(0, 1)
# type error addition
assert_raises(TypeError, lambda: 1j + 'str')
assert_raises(TypeError, lambda: 1j - 'str')
assert_raises(TypeError, lambda: 'str' + 1j)
assert_raises(TypeError, lambda: 'str' - 1j)
# overflow
assert_raises(OverflowError, lambda: complex(10 ** 1000, 0))
assert_raises(OverflowError, lambda: complex(0, 10 ** 1000))
assert_raises(OverflowError, lambda: 0j + 10 ** 1000)
# str/repr
assert '(1+1j)' == str(1+1j)
assert '(1-1j)' == str(1-1j)
assert '(1+1j)' == repr(1+1j)
assert '(1-1j)' == repr(1-1j)
# __getnewargs__
assert (3 + 5j).__getnewargs__() == (3.0, 5.0)
assert (5j).__getnewargs__() == (0.0, 5.0)
class Complex():
def __init__(self, real, imag):
self.real = real
self.imag = imag
def __repr__(self):
return "Com" + str((self.real, self.imag))
def __sub__(self, other):
return Complex(self.real - other, self.imag)
def __rsub__(self, other):
return Complex(other - self.real, -self.imag)
def __eq__(self, other):
return self.real == other.real and self.imag == other.imag
assert Complex(4, 5) - 3 == Complex(1, 5)
assert 7 - Complex(4, 5) == Complex(3, -5)
assert complex("5+2j") == 5 + 2j
assert complex("5-2j") == 5 - 2j
assert complex("-2j") == -2j
assert_raises(TypeError, lambda: complex("5+2j", 1))
assert_raises(ValueError, lambda: complex("abc"))
assert complex("1+10j") == 1+10j
assert complex(10) == 10+0j
assert complex(10.0) == 10+0j
assert complex(10) == 10+0j
assert complex(10+0j) == 10+0j
assert complex(1, 10) == 1+10j
assert complex(1, 10) == 1+10j
assert complex(1, 10.0) == 1+10j
assert complex(1, 10) == 1+10j
assert complex(1, 10) == 1+10j
assert complex(1, 10.0) == 1+10j
assert complex(1.0, 10) == 1+10j
assert complex(1.0, 10) == 1+10j
assert complex(1.0, 10.0) == 1+10j
assert complex(3.14+0j) == 3.14+0j
assert complex(3.14) == 3.14+0j
assert complex(314) == 314.0+0j
assert complex(314) == 314.0+0j
assert complex(3.14+0j, 0j) == 3.14+0j
assert complex(3.14, 0.0) == 3.14+0j
assert complex(314, 0) == 314.0+0j
assert complex(314, 0) == 314.0+0j
assert complex(0j, 3.14j) == -3.14+0j
assert complex(0.0, 3.14j) == -3.14+0j
assert complex(0j, 3.14) == 3.14j
assert complex(0.0, 3.14) == 3.14j
assert complex("1") == 1+0j
assert complex("1j") == 1j
assert complex() == 0
assert complex("-1") == -1
assert complex("+1") == +1
assert complex("(1+2j)") == 1+2j
assert complex("(1.3+2.2j)") == 1.3+2.2j
assert complex("3.14+1J") == 3.14+1j
assert complex(" ( +3.14-6J )") == 3.14-6j
assert complex(" ( +3.14-J )") == 3.14-1j
assert complex(" ( +3.14+j )") == 3.14+1j
assert complex("J") == 1j
assert complex("( j )") == 1j
assert complex("+J") == 1j
assert complex("( -j)") == -1j
assert complex('1e-500') == 0.0 + 0.0j
assert complex('-1e-500j') == 0.0 - 0.0j
assert complex('-1e-500+1e-500j') == -0.0 + 0.0j
# __complex__
def test__complex__():
z = 3 + 4j
assert z.__complex__() == z
assert type(z.__complex__()) == complex
class complex_subclass(complex):
pass
z = complex_subclass(3 + 4j)
assert z.__complex__() == 3 + 4j
assert type(z.__complex__()) == complex
testutils.skip_if_unsupported(3, 11, test__complex__)
| 27.385965
| 66
| 0.634369
|
0ffb1eb27677f679a3e4daafd6d648fa292394a9
| 521
|
py
|
Python
|
kabzimal/models/invocies.py
|
arasumran/kabzimal
|
dbae35fdb940bdf0338bd43983b1894c87a35961
|
[
"MIT"
] | null | null | null |
kabzimal/models/invocies.py
|
arasumran/kabzimal
|
dbae35fdb940bdf0338bd43983b1894c87a35961
|
[
"MIT"
] | 12
|
2020-06-05T23:02:28.000Z
|
2022-03-11T23:59:56.000Z
|
kabzimal/models/invocies.py
|
arasumran/kabzimal
|
dbae35fdb940bdf0338bd43983b1894c87a35961
|
[
"MIT"
] | null | null | null |
from kabzimal.models.order import OrdersModel
__author__ = 'umran'
from django.db import models
class InvoicesModels(models.Model):
invoices_status_code = models.CharField(db_column='invocies_status_code',max_length=250)
order = models.ForeignKey(OrdersModel,on_delete=models.CASCADE,db_column='order_id')
invoices_date = models.DateField(db_column='inocies_date')
invoice_details= models.CharField(db_column='invoices_details',max_length=500)
class Meta:
db_table = 'kabzimal_invoices'
| 30.647059
| 92
| 0.783109
|
72834cfd9a4a647bba28651d32c4d8aaae8f0959
| 2,383
|
py
|
Python
|
iupac-codes/iupac.py
|
LongNguyen1984/biofx_python
|
b8d45dc38d968674c6b641051b73f8ed1503b1e4
|
[
"MIT"
] | 1
|
2021-04-21T07:15:27.000Z
|
2021-04-21T07:15:27.000Z
|
iupac-codes/iupac.py
|
LongNguyen1984/biofx_python
|
b8d45dc38d968674c6b641051b73f8ed1503b1e4
|
[
"MIT"
] | null | null | null |
iupac-codes/iupac.py
|
LongNguyen1984/biofx_python
|
b8d45dc38d968674c6b641051b73f8ed1503b1e4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
""" Turn IUPAC DNA codes into regex """
import argparse
import sys
import re
from itertools import product
from typing import NamedTuple, List
class Args(NamedTuple):
pattern: str
# --------------------------------------------------
def get_args() -> Args:
""" Get command-line arguments """
parser = argparse.ArgumentParser(
description='Turn IUPAC DNA codes into regex',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('pattern',
metavar='pattern',
type=str,
help='IUPAC DNA sequence')
args = parser.parse_args()
return Args(args.pattern)
# --------------------------------------------------
def main() -> None:
""" Make a jazz noise here """
args = get_args()
trans = {
'A': ['A'],
'B': ['C', 'G', 'T'],
'C': ['C'],
'D': ['A', 'G', 'T'],
'G': ['G'],
'H': ['A', 'C', 'T'],
'K': ['G', 'T'],
'M': ['A', 'C'],
'N': ['A', 'C', 'G', 'T'],
'R': ['A', 'G'],
'S': ['G', 'C'],
'T': ['T'],
'U': ['U'],
'V': ['A', 'C', 'G'],
'W': ['A', 'T'],
'Y': ['C', 'T']
}
bases = sorted(trans.keys())
if not re.search('^[' + ''.join(bases) + ']+$', args.pattern):
sys.exit(f"Pattern must contain only {', '.join(bases)}.")
iupac = list(map(lambda base: trans[base], args.pattern))
regex = '^' + ''.join(map(make_alternatives, iupac)) + '$'
print('pattern = "{}"'.format(args.pattern))
print('regex = "{}"'.format(regex))
for possibility in sorted(product(*iupac)):
dna = ''.join(possibility)
print(dna, 'OK' if re.search(regex, dna) else 'NO')
# --------------------------------------------------
def make_alternatives(choices: List[str]) -> str:
""" Make alternatives """
n = len(choices)
return '' if n == 0 else choices[0] if n == 1 else f"[{''.join(choices)}]"
# --------------------------------------------------
def test_make_alternatives():
""" Test make_alternatives """
assert make_alternatives([]) == ''
assert make_alternatives(['A']) == 'A'
assert make_alternatives(['A', 'B']) == '[AB]'
# --------------------------------------------------
if __name__ == '__main__':
main()
| 25.902174
| 78
| 0.448175
|
e1860419811f10048fb371e70f9d17c03f6d65c9
| 5,421
|
py
|
Python
|
utils/pose_estimation.py
|
pedrocg42/awesome-cv-projects
|
928c48aa305d1cd0cd67412659a87ecc6fb6f8b0
|
[
"MIT"
] | null | null | null |
utils/pose_estimation.py
|
pedrocg42/awesome-cv-projects
|
928c48aa305d1cd0cd67412659a87ecc6fb6f8b0
|
[
"MIT"
] | null | null | null |
utils/pose_estimation.py
|
pedrocg42/awesome-cv-projects
|
928c48aa305d1cd0cd67412659a87ecc6fb6f8b0
|
[
"MIT"
] | null | null | null |
import cv2 as cv
import numpy as np
#########################################################
################## POSE ESTIMATION ######################
#########################################################
PART_NAMES = [
"nose",
"leftEye",
"rightEye",
"leftEar",
"rightEar",
"leftShoulder",
"rightShoulder",
"leftElbow",
"rightElbow",
"leftWrist",
"rightWrist",
"leftHip",
"rightHip",
"leftKnee",
"rightKnee",
"leftAnkle",
"rightAnkle",
]
NUM_KEYPOINTS = len(PART_NAMES)
PART_IDS = {pn: pid for pid, pn in enumerate(PART_NAMES)}
CONNECTED_PART_NAMES = [
("leftHip", "leftShoulder"),
("leftElbow", "leftShoulder"),
("leftElbow", "leftWrist"),
("leftHip", "leftKnee"),
("leftKnee", "leftAnkle"),
("rightHip", "rightShoulder"),
("rightElbow", "rightShoulder"),
("rightElbow", "rightWrist"),
("rightHip", "rightKnee"),
("rightKnee", "rightAnkle"),
("leftShoulder", "rightShoulder"),
("leftHip", "rightHip"),
]
CONNECTED_PART_INDICES = [(PART_IDS[a], PART_IDS[b]) for a, b in CONNECTED_PART_NAMES]
LOCAL_MAXIMUM_RADIUS = 1
POSE_CHAIN = [
("nose", "leftEye"),
("leftEye", "leftEar"),
("nose", "rightEye"),
("rightEye", "rightEar"),
("nose", "leftShoulder"),
("leftShoulder", "leftElbow"),
("leftElbow", "leftWrist"),
("leftShoulder", "leftHip"),
("leftHip", "leftKnee"),
("leftKnee", "leftAnkle"),
("nose", "rightShoulder"),
("rightShoulder", "rightElbow"),
("rightElbow", "rightWrist"),
("rightShoulder", "rightHip"),
("rightHip", "rightKnee"),
("rightKnee", "rightAnkle"),
]
PARENT_CHILD_TUPLES = [
(PART_IDS[parent], PART_IDS[child]) for parent, child in POSE_CHAIN
]
PART_CHANNELS = [
"left_face",
"right_face",
"right_upper_leg_front",
"right_lower_leg_back",
"right_upper_leg_back",
"left_lower_leg_front",
"left_upper_leg_front",
"left_upper_leg_back",
"left_lower_leg_back",
"right_feet",
"right_lower_leg_front",
"left_feet",
"torso_front",
"torso_back",
"right_upper_arm_front",
"right_upper_arm_back",
"right_lower_arm_back",
"left_lower_arm_front",
"left_upper_arm_front",
"left_upper_arm_back",
"left_lower_arm_back",
"right_hand",
"right_lower_arm_front",
"left_hand",
]
def draw_keypoints(
img,
instance_scores,
keypoint_scores,
keypoint_coords,
min_pose_confidence=0.5,
min_part_confidence=0.5,
):
cv_keypoints = []
for ii, score in enumerate(instance_scores):
if score < min_pose_confidence:
continue
for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
if ks < min_part_confidence:
continue
cv_keypoints.append(cv.KeyPoint(kc[1], kc[0], 10.0 * ks))
out_img = cv.drawKeypoints(img, cv_keypoints, outImage=np.array([]))
return out_img
def get_adjacent_keypoints(keypoint_scores, keypoint_coords, min_confidence=0.1):
results = []
for left, right in CONNECTED_PART_INDICES:
if (
keypoint_scores[left] < min_confidence
or keypoint_scores[right] < min_confidence
):
continue
results.append(
np.array(
[keypoint_coords[left][::-1], keypoint_coords[right][::-1]]
).astype(np.int32),
)
return results
def draw_skeleton(
img,
instance_scores,
keypoint_scores,
keypoint_coords,
min_pose_confidence=0.5,
min_part_confidence=0.5,
):
out_img = img
adjacent_keypoints = []
for ii, score in enumerate(instance_scores):
if score < min_pose_confidence:
continue
new_keypoints = get_adjacent_keypoints(
keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_confidence
)
adjacent_keypoints.extend(new_keypoints)
out_img = cv.polylines(
out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0)
)
return out_img
def draw_skel_and_kp(
img,
instance_scores,
keypoint_scores,
keypoint_coords,
min_pose_score=0.5,
min_part_score=0.5,
):
out_img = img
adjacent_keypoints = []
cv_keypoints = []
for ii, score in enumerate(instance_scores):
if score < min_pose_score:
continue
new_keypoints = get_adjacent_keypoints(
keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_score
)
adjacent_keypoints.extend(new_keypoints)
for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
if ks < min_part_score:
continue
cv_keypoints.append(cv.KeyPoint(kc[1], kc[0], 10.0 * ks))
out_img = cv.drawKeypoints(
out_img,
cv_keypoints,
outImage=np.array([]),
color=(255, 255, 0),
flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
)
out_img = cv.polylines(
out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0)
)
return out_img
def unprocess_keypoint_coords(coords, image_shape, input_image_shape):
ratio_y = input_image_shape[0] * (image_shape[0] / input_image_shape[0])
ratio_x = input_image_shape[1] * (image_shape[1] / input_image_shape[1])
coords[:, :, 0] *= ratio_y
coords[:, :, 1] *= ratio_x
return coords
| 26.0625
| 86
| 0.604317
|
b58620490050d28e572f7a860894a765e5fb47ba
| 10,615
|
py
|
Python
|
aludel/database.py
|
praekelt/aludel
|
039ae119056efd52be75015d3877ac2f51ebedfe
|
[
"BSD-3-Clause"
] | 2
|
2015-03-24T20:23:09.000Z
|
2015-11-01T22:52:27.000Z
|
aludel/database.py
|
praekelt/aludel
|
039ae119056efd52be75015d3877ac2f51ebedfe
|
[
"BSD-3-Clause"
] | 1
|
2015-01-16T08:12:22.000Z
|
2015-01-16T08:12:22.000Z
|
aludel/database.py
|
praekelt/aludel
|
039ae119056efd52be75015d3877ac2f51ebedfe
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from alchimia import TWISTED_STRATEGY
from sqlalchemy import MetaData, Table, Column, String, Text, create_engine
from sqlalchemy.schema import CreateTable
from twisted.internet.defer import succeed
def get_engine(conn_str, reactor):
return create_engine(conn_str, reactor=reactor, strategy=TWISTED_STRATEGY)
class TableMissingError(Exception):
"""
Raised when a table does not exist in the database.
"""
class CollectionMissingError(Exception):
"""
Raised when no metadata was found for a :class:`TableCollection`.
"""
class make_table(object):
def __init__(self, *args, **kw):
self.args = args
self.kw = kw
def make_table(self, name, metadata):
return Table(name, metadata, *self.copy_args(), **self.kw)
def copy_args(self):
for arg in self.args:
if isinstance(arg, Column):
yield arg.copy()
else:
yield arg
def _false_to_error(result, err):
if not result:
raise err
return result
TABLE_EXISTS_ERR_TEMPLATES = (
# SQLite
'table %(name)s already exists',
'table "%(name)s" already exists',
# PostgreSQL
'relation %(name)s already exists',
'relation "%(name)s" already exists',
# MySQL
'Table %(name)s already exists',
"Table '%(name)s' already exists",
)
class _PrefixedTables(object):
def __init__(self, name, connection):
self.name = name
self._conn = connection
self._metadata = MetaData()
for attr in dir(self):
attrval = getattr(self, attr)
if isinstance(attrval, make_table):
setattr(self, attr, attrval.make_table(
self.get_table_name(attr), self._metadata))
def get_table_name(self, name):
raise NotImplementedError(
"_PrefixedTables should not be used directly.")
def _create_table(self, trx, table):
# This works around alchimia's current inability to create tables only
# if they don't already exist.
def table_exists_errback(f):
for err_template in TABLE_EXISTS_ERR_TEMPLATES:
# Sometimes the table name is lowercased.
for name in (table.name, table.name.lower()):
if err_template % {'name': name} in str(f.value):
return None
return f
d = self._conn.execute(CreateTable(table))
d.addErrback(table_exists_errback)
return d.addCallback(lambda r: trx)
def _create_tables(self):
d = self._conn.begin()
for table in self._metadata.sorted_tables:
d.addCallback(self._create_table, table)
return d.addCallback(lambda trx: trx.commit())
def exists(self):
raise NotImplementedError(
"_PrefixedTables should not be used directly.")
def _execute_query(self, query, *args, **kw):
return self._conn.execute(query, *args, **kw)
def execute_query(self, query, *args, **kw):
raise NotImplementedError(
"_PrefixedTables should not be used directly.")
def execute_fetchall(self, query, *args, **kw):
d = self.execute_query(query, *args, **kw)
return d.addCallback(lambda result: result.fetchall())
class CollectionMetadata(_PrefixedTables):
"""
Metadata manager for PrefixedTableCollection.
This tracks table prefixes and metadata for a given collection type.
"""
collection_metadata = make_table(
Column("name", String(255), primary_key=True),
Column("metadata_json", Text(), nullable=False),
)
_existence_cache_dict = None
@property
def _existence_cache(self):
if self._existence_cache_dict is None:
self._existence_cache_dict = {}
return self._existence_cache_dict
def get_table_name(self, name):
return '%s_%s' % (name, self.name)
def exists(self):
# It would be nice to make this not use private things.
return self._conn._engine.has_table(self.collection_metadata.name)
def create(self):
return self._create_tables()
def execute_query(self, query, *args, **kw):
d = self.exists()
d.addCallback(
_false_to_error, TableMissingError(self.collection_metadata.name))
d.addCallback(lambda _: self._execute_query(query, *args, **kw))
return d
def _update_existence_cache(self, new_metadata, clear=False):
cache = self._existence_cache
if clear:
cache.clear()
cache.update(dict((k, False if v is None else True)
for k, v in new_metadata.iteritems()))
# We return this so we can chain callbacks.
return new_metadata
def _rows_to_dict(self, rows):
metadata_dict = {}
for name, metadata_json in rows:
metadata_dict[name] = metadata_json
return metadata_dict
def _add_row_to_metadata(self, row, name):
metadata_json = None
if row is not None:
metadata_json = row.metadata_json
self._update_existence_cache({name: metadata_json})
return metadata_json
def _none_if_table_missing_eb(self, failure):
failure.trap(TableMissingError)
return None
def _decode_metadata(self, metadata_json, name):
if metadata_json is None:
raise CollectionMissingError(name)
return json.loads(metadata_json)
def _get_metadata(self, name):
d = self.execute_query(
self.collection_metadata.select().where(
self.collection_metadata.c.name == name))
d.addCallback(lambda result: result.fetchone())
d.addCallback(self._add_row_to_metadata, name)
return d
def get_metadata(self, name):
d = self._get_metadata(name)
d.addErrback(self._none_if_table_missing_eb)
d.addCallback(self._decode_metadata, name)
return d
def _decode_all_metadata(self, all_metadata):
metadata = {}
for name, metadata_json in all_metadata.iteritems():
if metadata_json is not None:
metadata[name] = json.loads(metadata_json)
return metadata
def get_all_metadata(self):
d = self.execute_fetchall(self.collection_metadata.select())
d.addCallback(self._rows_to_dict)
d.addCallback(self._update_existence_cache, clear=True)
d.addCallback(self._decode_all_metadata)
return d
def set_metadata(self, name, metadata):
metadata_json = json.dumps(metadata)
d = self.execute_query(
self.collection_metadata.update().where(
self.collection_metadata.c.name == name,
).values(metadata_json=metadata_json))
d.addCallback(lambda result: {name: metadata_json})
d.addCallback(self._update_existence_cache)
return d
def _create_collection(self, exists, name, metadata):
metadata_json = json.dumps(metadata)
if exists:
return
if exists is None:
d = self.create()
else:
d = succeed(None)
d.addCallback(lambda _: self.execute_query(
self.collection_metadata.insert().values(
name=name, metadata_json=metadata_json)))
d.addCallback(lambda result: {name: metadata_json})
d.addCallback(self._update_existence_cache)
return d
def create_collection(self, name, metadata=None):
"""
Create a metadata entry for the named collection.
:param str name: Name of the collection to check.
:param dict metadata:
Metadata value to store. If ``None``, an empty dict will be used.
If the metadata table does not exist, :meth:`CollectionMetadata.create`
will be called first.
"""
if metadata is None:
metadata = {}
d = self.collection_exists(name)
d.addCallback(self._create_collection, name, metadata)
return d
def collection_exists(self, name):
"""
Check for the existence of the named collection.
:param str name: Name of the collection to check.
If there is a metadata entry for ``name``, ``True`` is returned. If
there is no metadata entry, ``False`` is returned. If the metadata
table does not exist, ``None`` is returned. Both ``False`` and ``None``
are truthless values and the difference may be important to the caller.
:returns:
A :class:`Deferred` that fires with ``True``, ``False``, or
``None``.
"""
d = succeed(name)
if name not in self._existence_cache:
d.addCallback(self._get_metadata)
d.addCallback(lambda _: self._existence_cache[name])
d.addErrback(self._none_if_table_missing_eb)
return d
class TableCollection(_PrefixedTables):
"""
Collection of database tables sharing a common prefix.
Each table is prefixed with the collection type and name.
The collection type defaults to the class name, but the
:attr:`COLLECTION_TYPE` class attribute may be set to override this.
"""
COLLECTION_TYPE = None
def __init__(self, name, connection, collection_metadata=None):
super(TableCollection, self).__init__(name, connection)
if collection_metadata is None:
collection_metadata = CollectionMetadata(
self.collection_type(), connection)
self._collection_metadata = collection_metadata
@classmethod
def collection_type(cls):
ctype = cls.COLLECTION_TYPE
if ctype is None:
ctype = cls.__name__
return ctype
def get_table_name(self, name):
return '%s_%s_%s' % (self.collection_type(), self.name, name)
def exists(self):
return self._collection_metadata.collection_exists(self.name)
def create_tables(self, metadata=None):
d = self._create_tables()
d.addCallback(lambda _: self._collection_metadata.create_collection(
self.name, metadata))
return d
def get_metadata(self):
return self._collection_metadata.get_metadata(self.name)
def set_metadata(self, metadata):
return self._collection_metadata.set_metadata(self.name, metadata)
def execute_query(self, query, *args, **kw):
d = self.exists()
d.addCallback(_false_to_error, CollectionMissingError(self.name))
d.addCallback(lambda _: self._execute_query(query, *args, **kw))
return d
| 32.762346
| 79
| 0.640415
|
59f3776d2a8aadddc96203240c1d8a34c91aaddf
| 3,159
|
py
|
Python
|
huaweicloud-sdk-dms/huaweicloudsdkdms/v2/model/show_queue_project_tags_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-dms/huaweicloudsdkdms/v2/model/show_queue_project_tags_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-dms/huaweicloudsdkdms/v2/model/show_queue_project_tags_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowQueueProjectTagsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'tags': 'list[ShowProjectTagsRespTags]'
}
attribute_map = {
'tags': 'tags'
}
def __init__(self, tags=None):
"""ShowQueueProjectTagsResponse - a model defined in huaweicloud sdk"""
super(ShowQueueProjectTagsResponse, self).__init__()
self._tags = None
self.discriminator = None
if tags is not None:
self.tags = tags
@property
def tags(self):
"""Gets the tags of this ShowQueueProjectTagsResponse.
标签列表
:return: The tags of this ShowQueueProjectTagsResponse.
:rtype: list[ShowProjectTagsRespTags]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this ShowQueueProjectTagsResponse.
标签列表
:param tags: The tags of this ShowQueueProjectTagsResponse.
:type: list[ShowProjectTagsRespTags]
"""
self._tags = tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowQueueProjectTagsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.469565
| 79
| 0.563786
|
7a7ba285b86f80d9fd66c433384d12e7072296dc
| 5,895
|
py
|
Python
|
multiplayer_mode.py
|
kmranrg/ConnectFour
|
db882d80e3fa0fe9800b26301020b7a5d80545dd
|
[
"BSD-3-Clause"
] | null | null | null |
multiplayer_mode.py
|
kmranrg/ConnectFour
|
db882d80e3fa0fe9800b26301020b7a5d80545dd
|
[
"BSD-3-Clause"
] | null | null | null |
multiplayer_mode.py
|
kmranrg/ConnectFour
|
db882d80e3fa0fe9800b26301020b7a5d80545dd
|
[
"BSD-3-Clause"
] | null | null | null |
# Multi-Player Mode
import numpy as np
import pygame
import sys
import math
BLUE = (0,0,255)
DARK_BLUE = (0,0,40)
RED = (255,0,0)
YELLOW = (255,255,0)
ROW_COUNT = 6
COLUMN_COUNT = 7
def create_board():
board = np.zeros((ROW_COUNT,COLUMN_COUNT))
return board
def drop_piece(board, row, col, piece):
board[row][col] = piece
def is_valid_location(board, col):
return board[ROW_COUNT-1][col] == 0
def get_next_open_row(board, col):
for r in range(ROW_COUNT):
if board[r][col] == 0:
return r
def print_board(board):
print(np.flip(board, 0))
def winning_move(board, piece):
# Check horizontal locations for win
for c in range(COLUMN_COUNT-3):
for r in range(ROW_COUNT):
if board[r][c] == piece and board[r][c+1] == piece and board[r][c+2] == piece and board[r][c+3] == piece:
return True
# Check vertical locations for win
for c in range(COLUMN_COUNT):
for r in range(ROW_COUNT-3):
if board[r][c] == piece and board[r+1][c] == piece and board[r+2][c] == piece and board[r+3][c] == piece:
return True
# Check positively sloped diaganols
for c in range(COLUMN_COUNT-3):
for r in range(ROW_COUNT-3):
if board[r][c] == piece and board[r+1][c+1] == piece and board[r+2][c+2] == piece and board[r+3][c+3] == piece:
return True
# Check negatively sloped diaganols
for c in range(COLUMN_COUNT-3):
for r in range(3, ROW_COUNT):
if board[r][c] == piece and board[r-1][c+1] == piece and board[r-2][c+2] == piece and board[r-3][c+3] == piece:
return True
def draw_board(board):
for c in range(COLUMN_COUNT):
for r in range(ROW_COUNT):
pygame.draw.rect(screen, BLUE, (c*SQUARESIZE, r*SQUARESIZE+SQUARESIZE, SQUARESIZE, SQUARESIZE))
pygame.draw.circle(screen, DARK_BLUE, (int(c*SQUARESIZE+SQUARESIZE/2), int(r*SQUARESIZE+SQUARESIZE+SQUARESIZE/2)), RADIUS)
for c in range(COLUMN_COUNT):
for r in range(ROW_COUNT):
if board[r][c] == 1:
pygame.draw.circle(screen, RED, (int(c*SQUARESIZE+SQUARESIZE/2), height-int(r*SQUARESIZE+SQUARESIZE/2)), RADIUS)
elif board[r][c] == 2:
pygame.draw.circle(screen, YELLOW, (int(c*SQUARESIZE+SQUARESIZE/2), height-int(r*SQUARESIZE+SQUARESIZE/2)), RADIUS)
pygame.display.update()
board = create_board()
print_board(board)
game_over = False
turn = 0
pygame.init()
SQUARESIZE = 100
width = COLUMN_COUNT * SQUARESIZE
height = (ROW_COUNT+1) * SQUARESIZE
size = (width, height)
RADIUS = int(SQUARESIZE/2 - 5)
screen = pygame.display.set_mode(size)
draw_board(board)
pygame.display.update()
myfont = pygame.font.SysFont("monospace", 75)
while not game_over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEMOTION:
pygame.draw.rect(screen, DARK_BLUE, (0,0, width, SQUARESIZE))
posx = event.pos[0]
if turn == 0:
pygame.draw.circle(screen, RED, (posx, int(SQUARESIZE/2)), RADIUS)
else:
pygame.draw.circle(screen, YELLOW, (posx, int(SQUARESIZE/2)), RADIUS)
pygame.display.update()
if event.type == pygame.MOUSEBUTTONDOWN:
pygame.draw.rect(screen, DARK_BLUE, (0,0, width, SQUARESIZE))
# Ask for Player 1 Input
if turn == 0:
posx = event.pos[0]
col = int(math.floor(posx/SQUARESIZE))
if is_valid_location(board, col):
row = get_next_open_row(board, col)
drop_piece(board, row, col, 1)
if winning_move(board, 1):
label = myfont.render("PLAYER 1 WINS!", 1, RED)
screen.blit(label, (40,10))
game_over = True
# # Ask for Player 2 Input
else:
posx = event.pos[0]
col = int(math.floor(posx/SQUARESIZE))
if is_valid_location(board, col):
row = get_next_open_row(board, col)
drop_piece(board, row, col, 2)
if winning_move(board, 2):
label = myfont.render("PLAYER 2 WINS!", 1, YELLOW)
screen.blit(label, (40,10))
game_over = True
print_board(board)
draw_board(board)
turn += 1
turn = turn % 2
if game_over:
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
break
| 37.788462
| 147
| 0.461917
|
9353f2647c1b6d31d83ed6ed93e79e1277e24d26
| 21,121
|
py
|
Python
|
Src/Clova/vendor/requests/adapters.py
|
NishiYusuke/Line-boot-award
|
d77f26b9109f3cba45be5906bcb6c9314974cd92
|
[
"MIT"
] | null | null | null |
Src/Clova/vendor/requests/adapters.py
|
NishiYusuke/Line-boot-award
|
d77f26b9109f3cba45be5906bcb6c9314974cd92
|
[
"MIT"
] | null | null | null |
Src/Clova/vendor/requests/adapters.py
|
NishiYusuke/Line-boot-award
|
d77f26b9109f3cba45be5906bcb6c9314974cd92
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import os.path
import socket
from urllib3.poolmanager import PoolManager, proxy_from_url
from urllib3.response import HTTPResponse
from urllib3.util import Timeout as TimeoutSauce
from urllib3.util.retry import Retry
from urllib3.exceptions import ClosedPoolError
from urllib3.exceptions import ConnectTimeoutError
from urllib3.exceptions import HTTPError as _HTTPError
from urllib3.exceptions import MaxRetryError
from urllib3.exceptions import NewConnectionError
from urllib3.exceptions import ProxyError as _ProxyError
from urllib3.exceptions import ProtocolError
from urllib3.exceptions import ReadTimeoutError
from urllib3.exceptions import SSLError as _SSLError
from urllib3.exceptions import ResponseError
from .models import Response
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url, urldefragauth,
select_proxy)
from .structures import CaseInsensitiveDict
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError, InvalidSchema)
from .auth import _basic_auth_str
try:
from urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
raise NotImplementedError
def close(self):
"""Cleans up adapter specific items."""
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
:rtype: urllib3.ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith('socks'):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return manager
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc or not os.path.exists(cert_loc):
raise IOError("Could not find a suitable TLS CA certificate bundle, "
"invalid path: {0}".format(cert_loc))
conn.cert_reqs = 'CERT_REQUIRED'
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
conn.key_file = None
if conn.cert_file and not os.path.exists(conn.cert_file):
raise IOError("Could not find the TLS certificate file, "
"invalid path: {0}".format(conn.cert_file))
if conn.key_file and not os.path.exists(conn.key_file):
raise IOError("Could not find the TLS key file, "
"invalid path: {0}".format(conn.key_file))
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
:rtype: requests.Response
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this closes the PoolManager and any active ProxyManager,
which closes any pooled connections.
"""
self.poolmanager.clear()
for proxy in self.proxy_manager.values():
proxy.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
:rtype: str
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = (proxy and scheme != 'https')
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith('socks')
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:rtype: dict
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple or urllib3 Timeout object
:param verify: (optional) Either a boolean, in which case it controls whether
we verify the server's TLS certificate, or a string, in which case it
must be a path to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
:rtype: requests.Response
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
elif isinstance(timeout, TimeoutSauce):
pass
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
# Receive the response from the server
try:
# For Python 2.7+ versions, use buffering of HTTP
# responses
r = low_conn.getresponse(buffering=True)
except TypeError:
# For compatibility with Python 2.6 versions and back
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| 40.539347
| 98
| 0.600682
|
1d2294901b97c3c836434b248c91f9d8610a0c55
| 3,889
|
py
|
Python
|
Car_task_functions.py
|
AdoNunes/ASD_MNE_pipe
|
8da711dbd96f8308d9fcd5403b46a2903f67f6bd
|
[
"MIT"
] | null | null | null |
Car_task_functions.py
|
AdoNunes/ASD_MNE_pipe
|
8da711dbd96f8308d9fcd5403b46a2903f67f6bd
|
[
"MIT"
] | 1
|
2019-11-18T15:36:43.000Z
|
2019-11-18T15:36:43.000Z
|
Car_task_functions.py
|
AdoNunes/ASD_MNE_pipe
|
8da711dbd96f8308d9fcd5403b46a2903f67f6bd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 10 18:42:32 2019
@author: an512
Car task functions
"""
def get_epoch_times(epoch, PD_chn='UADC015-3007', trig_chn='UPPT001',
plot=False):
"""Get time start, response, out, end """
import numpy as np
epoch.pick_types(misc=True, meg=False)
fs = epoch.info['sfreq']
pik = epoch.ch_names.index(PD_chn)
n_trials = epoch.get_data().shape[0]
time_in = epoch.time_as_index(0)[0]
PD = epoch.get_data(pik)[:, 0, time_in:]
pik_tg = epoch.ch_names.index(trig_chn)
Tg = epoch.get_data(pik_tg)[:, 0, :]
resp_val = 1
end_trl_val = 99
beg_val = [10, 20, 30, 40]
PD_OFF = [None] * n_trials
time_beg = [None] * n_trials
time_end = [None] * n_trials
time_resp = [None] * n_trials
time_out = [None] * n_trials
for e in np.arange(n_trials):
# Photodiode
PD_OFF_temp = []
percentile = np.percentile(PD[e, :], [1, 99])
n, bins = np.histogram(PD[e, :], bins=50, range=percentile)
T_PD = PD[e, :] > bins[26] # set a treshold
t_min = 0.16 # min PD length in ms
min_samp4 = round(t_min * fs/4) # quater PD min length
min_samp8 = round(t_min * fs/8) # 1/8 PD min length
for ind, now in enumerate(T_PD):
if (now == False and T_PD[ind-1] == True and
np.all(T_PD[ind-min_samp8: ind-1] == True) and
np.all(T_PD[ind+min_samp8: ind+min_samp4] == False)):
PD_OFF_temp.append(ind)
PD_OFF[e] = PD_OFF_temp[-1]
# Response
res_sam = [i for i, j in enumerate(Tg[e, :]) if j == resp_val]
# Get one sample per response
res = np.array([j for i, j in enumerate(res_sam)
if i == 0 or j - 1 > res_sam[i - 1]])
resp_in = res[res > time_in]
if resp_in.size is not 1:
resp_in = [None]
time_resp[e] = resp_in
# Time end and begenning
time_end_ix = np.where(Tg[e, :] == end_trl_val)
time_end_ix_last = time_end_ix[-1]
if time_end_ix_last.size > 0 and np.any(time_end_ix_last > time_in):
t = time_end_ix_last[-1]
elif time_resp[e] == [None]: # if no end trigger and no resp
t = PD_OFF[e] + time_in
else: # if no end trigger take either resp or PD off
t = max(time_resp[e][0], PD_OFF[e] + time_in)
time_end[e] = t
X = [i for i in range(len(Tg[e, :])) if np.any(Tg[e, i] == beg_val)]
if X == []:
X = [0]
else:
time_beg[e] = X[0]
# Get first PD OFF and add 0 time samples
time_out = PD_OFF + time_in
assert np.all(time_end >= time_out)
if plot is True:
import matplotlib.pyplot as plt
plt.figure()
for d in range(n_trials):
rows = np.floor(np.sqrt(n_trials))
cols = np.ceil(np.sqrt(n_trials))
plt.subplot(rows, cols, 1+d)
b = time_beg[d]
e = time_end[d]
plt.plot(epoch.times[b:e], Tg[d, b:e].T)
plt.plot(epoch.times[b:e], epoch.get_data(pik)[d, 0, b:e].T)
plt.plot(epoch.times[time_resp[d]], [7], 'go', linewidth=5)
plt.plot(epoch.times[time_end[d]-1], [7], 'ro', linewidth=5)
plt.plot(epoch.times[time_beg[d]], [7], 'bo', linewidth=5)
plt.plot(epoch.times[time_out[d]], [7], 'ko', linewidth=5)
return time_beg, time_resp, time_out, time_end
'''
plt.figure()
plt.plot(epoch.times, Tg.T )
plt.plot(epoch.times[time_beg],[7]*len(time_beg), 'bo', linewidth=5)
plt.plot(epoch.times[time_out],[7]*len(time_out), 'ro', linewidth=5)
plt.plot(epoch.times[time_resp],[7]*len(time_resp), 'go', linewidth=5)
plt.plot(epoch.times[time_resp],epoch.times[time_out], 'go', linewidth=5)
import matplotlib.pyplot as plt
'''
| 35.678899
| 76
| 0.568269
|
6b0410e62e5ca9324ffcd55bcd16256c85444c14
| 1,690
|
py
|
Python
|
covidregister/register/migrations/0008_auto_20200525_1838.py
|
rattletat/covid-register.eu
|
fa9b5512e2a9d9ec4d1aafa577a84181085bb0ca
|
[
"MIT"
] | null | null | null |
covidregister/register/migrations/0008_auto_20200525_1838.py
|
rattletat/covid-register.eu
|
fa9b5512e2a9d9ec4d1aafa577a84181085bb0ca
|
[
"MIT"
] | null | null | null |
covidregister/register/migrations/0008_auto_20200525_1838.py
|
rattletat/covid-register.eu
|
fa9b5512e2a9d9ec4d1aafa577a84181085bb0ca
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.6 on 2020-05-25 18:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('register', '0007_auto_20200525_1810'),
]
operations = [
migrations.RemoveField(
model_name='covidillness',
name='mild',
),
migrations.RemoveField(
model_name='covidillness',
name='moderate',
),
migrations.RemoveField(
model_name='covidillness',
name='severe',
),
migrations.RemoveField(
model_name='covidillness',
name='symptom_free',
),
migrations.AddField(
model_name='covidillness',
name='days_mild',
field=models.PositiveSmallIntegerField(default=0, verbose_name='Days mild'),
),
migrations.AddField(
model_name='covidillness',
name='days_moderate',
field=models.PositiveSmallIntegerField(default=0, verbose_name='Days moderate'),
),
migrations.AddField(
model_name='covidillness',
name='days_severe',
field=models.PositiveSmallIntegerField(default=0, verbose_name='Days severe'),
),
migrations.AddField(
model_name='covidillness',
name='days_symptom_free',
field=models.PositiveSmallIntegerField(default=0, verbose_name='Days symptom free'),
),
migrations.AddField(
model_name='medication',
name='end',
field=models.DateField(blank=True, null=True, verbose_name='End of medication'),
),
]
| 30.727273
| 96
| 0.577515
|
ac5fcdf8772d388e878d7e60ae3bbb5d40b83e83
| 1,524
|
py
|
Python
|
data/biodati/download_biodati.py
|
COVID-19-Causal-Reasoning/bel4corona
|
592da35133c4a6cc0e4605737fa5e6ea11586209
|
[
"MIT"
] | 3
|
2020-05-06T18:18:30.000Z
|
2020-05-19T04:56:48.000Z
|
data/biodati/download_biodati.py
|
COVID-19-Causal-Reasoning/bel4corona
|
592da35133c4a6cc0e4605737fa5e6ea11586209
|
[
"MIT"
] | null | null | null |
data/biodati/download_biodati.py
|
COVID-19-Causal-Reasoning/bel4corona
|
592da35133c4a6cc0e4605737fa5e6ea11586209
|
[
"MIT"
] | 1
|
2020-05-21T00:46:44.000Z
|
2020-05-21T00:46:44.000Z
|
# -*- coding: utf-8 -*-
"""Tools for acquiring and normalizing the content from BioDati's demo server."""
import json
import os
import click
import pybel.grounding
HERE = os.path.abspath(os.path.dirname(__file__))
NETWORK_ID = '01E46GDFQAGK5W8EFS9S9WMH12'
RAW_PATH = os.path.join(HERE, 'covid19-biodati-raw.bel.nodelink.json')
GROUNDED_PATH = os.path.join(HERE, 'covid19-biodati-grounded.bel.nodelink.json')
@click.command()
@click.option('--force', is_flag=True)
@click.option('--user', prompt=True)
@click.password_option()
def main(force: bool, user: str, password: str):
"""Download and dump the BioDati 'rona graph."""
if not os.path.exists(GROUNDED_PATH) and not force:
if not os.path.exists(RAW_PATH) and not force:
graph = pybel.from_biodati(
network_id=NETWORK_ID,
username='demo@biodati.com',
password='demo',
base_url='https://networkstore.demo.biodati.com',
)
pybel.dump(graph, RAW_PATH)
graph = pybel.load(RAW_PATH)
# This will probably not work for you (yet!)
graph = pybel.grounding.ground(graph)
graph.summarize()
pybel.dump(graph, GROUNDED_PATH)
else:
graph = pybel.load(GROUNDED_PATH)
res = pybel.to_bel_commons(
graph=graph,
host='https://bel.labs.coronawhy.org',
user=user,
password=password,
)
click.secho(json.dumps(res.json(), indent=2))
if __name__ == '__main__':
main()
| 27.709091
| 81
| 0.639764
|
fc584972b18aeb98fbdee5ee44b35cb43d5f058c
| 526
|
py
|
Python
|
src/ui/__init__.py
|
zsnake1209/SeExpr
|
5e09e1e84b5acb13c6402f2307d5375631f5f4c1
|
[
"Apache-2.0"
] | 294
|
2015-01-24T00:33:55.000Z
|
2022-03-12T14:29:28.000Z
|
src/SeExpr2/UI/__init__.py
|
cooljeanius/SeExpr
|
42b695546689c61ae87c7747fc27ae50653f09f5
|
[
"Apache-2.0"
] | 53
|
2015-07-07T06:07:35.000Z
|
2021-05-27T19:06:31.000Z
|
src/SeExpr2/UI/__init__.py
|
cooljeanius/SeExpr
|
42b695546689c61ae87c7747fc27ae50653f09f5
|
[
"Apache-2.0"
] | 82
|
2015-02-27T10:26:17.000Z
|
2022-03-27T19:28:25.000Z
|
##
# @file __init__.py
# @brief Package definition for expreditor api.
#
# <b>CONFIDENTIAL INFORMATION: This software is the confidential and
# proprietary information of Walt Disney Animation Studios ("WDAS").
# This software may not be used, disclosed, reproduced or distributed
# for any purpose without prior written authorization and license
# from WDAS. Reproduction of any section of this software must include
# this legend and all copyright notices.
# Copyright Disney Enterprises, Inc. All rights reserved.</b>
#
#
| 35.066667
| 70
| 0.777567
|
bb384eec133d5e7aa9139e1dbe80c45cb0b15e8d
| 3,781
|
py
|
Python
|
research/object_detection/ts_space/scripts/test_tf2_model.py
|
ts-deep-learning/models
|
8f837f6c57803338ce35494d7e84eb50e3a10111
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/ts_space/scripts/test_tf2_model.py
|
ts-deep-learning/models
|
8f837f6c57803338ce35494d7e84eb50e3a10111
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/ts_space/scripts/test_tf2_model.py
|
ts-deep-learning/models
|
8f837f6c57803338ce35494d7e84eb50e3a10111
|
[
"Apache-2.0"
] | null | null | null |
'''
works on branch r1.13.0
This script needs to be placed in <path_to_tensorflow/models>/research/object_detection.
With paths to label file, frozen model and test images it will save the detections in the results directory.
'''
import numpy as np
import os
import sys
from tqdm import tqdm
import tensorflow as tf
import json
import argparse
import time
import cv2
from pprint import pprint
parser = argparse.ArgumentParser(
description="Script to get test_report.json from object det api models")
parser.add_argument('-m', default="", help="PATH_TO_FROZEN_GRAPH")
parser.add_argument('-th', default=0.1, type=float, help="SCORE_THRESHOLD")
parser.add_argument('-dr', default="", type=str, help="Data root directory")
parser.add_argument('-j', default="", type=str, help="Dataset JSON file path")
args = parser.parse_args()
PATH_TO_SAVED_MODEL = args.m
SCORE_THRESHOLD = args.th
DATA_ROOT = args.dr
JSON_PATH = args.j
PATH_TO_RESULTS = os.path.dirname(args.m)
OUTPUT_IMAGES_DIR = os.path.join(PATH_TO_RESULTS, 'images')
if not os.path.exists(PATH_TO_RESULTS):
os.mkdir(PATH_TO_RESULTS)
if not os.path.exists(OUTPUT_IMAGES_DIR):
os.mkdir(OUTPUT_IMAGES_DIR)
def load_image_into_numpy_array(image_path):
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def run_inference(detect_fn):
det_boxes = []
det_scores = []
det_labels = []
true_boxes = []
# Open the JSON and get annotations
with open(JSON_PATH, 'r') as fp:
gt_data = json.load(fp)
annotations = gt_data['dataset'][0]['annotations']
print('Total number of images : ', len(annotations))
# for file_name in tqdm(file_names):
for annotation in tqdm(annotations):
image_path = annotation['image_path']
image_path = os.path.join(DATA_ROOT, image_path)
image_np = load_image_into_numpy_array(image_path)
input_tensor = tf.convert_to_tensor(image_np)
input_tensor = input_tensor[tf.newaxis, ...]
output_dict = detect_fn(input_tensor)
num_detections = int(output_dict.pop('num_detections'))
output_dict = {key: value[0, :num_detections].numpy()
for key, value in output_dict.items()}
output_dict['num_detections'] = num_detections
# detection_classes should be ints.
output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)
# adding det boxes, labels and scores
index_to_keep = output_dict['detection_scores'] > SCORE_THRESHOLD
mod_det_scores = output_dict['detection_scores'][index_to_keep].tolist(
)
mod_det_boxes = []
height, width = image_np.shape[:2]
for bbox in (output_dict['detection_boxes'][index_to_keep]):
ymin, xmin, ymax, xmax = bbox
mod_det_boxes.append(
[int(xmin*width), int(ymin*height), int(xmax*width), int(ymax*height)])
det_boxes.append(mod_det_boxes)
det_scores.append(mod_det_scores)
det_labels.append(
output_dict['detection_classes'][index_to_keep].tolist())
gt_boxes = [bbox['box_coordinates'] for bbox in annotation['bbox_info']]
true_boxes.append(gt_boxes)
json_output = {
'det_boxes': det_boxes,
'det_labels': det_labels,
'det_scores': det_scores,
'true_boxes': true_boxes
}
filename = os.path.basename(
args.m) + '_' + str(time.time()) + '_test_results.json'
results_path = os.path.join(PATH_TO_RESULTS, filename)
with open(results_path, 'w') as fp:
json.dump(json_output, fp)
def main():
detect_fn = tf.saved_model.load(PATH_TO_SAVED_MODEL)
run_inference(detect_fn)
if __name__ == '__main__':
main()
| 33.166667
| 108
| 0.688442
|
3fd2a0e6a883369fafefbd422bbdf203027c0d15
| 60
|
py
|
Python
|
tests/__init__.py
|
fosterrath-mila/deepgroove_user_exp
|
bec9ac7dc408d7bc24b1fde03e9723e7c13b5541
|
[
"BSD-3-Clause"
] | 2
|
2020-03-19T19:23:47.000Z
|
2020-03-20T08:45:23.000Z
|
tests/__init__.py
|
fosterrath-mila/deepgroove_user_exp
|
bec9ac7dc408d7bc24b1fde03e9723e7c13b5541
|
[
"BSD-3-Clause"
] | 7
|
2020-04-01T22:40:40.000Z
|
2020-05-01T15:53:00.000Z
|
tests/__init__.py
|
fosterrath-mila/deepgroove_user_exp
|
bec9ac7dc408d7bc24b1fde03e9723e7c13b5541
|
[
"BSD-3-Clause"
] | 2
|
2020-03-19T20:03:48.000Z
|
2020-04-24T02:36:20.000Z
|
"""Unit test package for deepgroove_web_user_experiment."""
| 30
| 59
| 0.8
|
ad42b96f1c72120557886da4d0fb649a96f4924e
| 2,151
|
py
|
Python
|
fhir/resources/tests/test_devicedefinition.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
fhir/resources/tests/test_devicedefinition.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
fhir/resources/tests/test_devicedefinition.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/DeviceDefinition
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import io
import json
import os
import unittest
import pytest
from .. import devicedefinition
from ..fhirdate import FHIRDate
from .fixtures import force_bytes
@pytest.mark.usefixtures("base_settings")
class DeviceDefinitionTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get("FHIR_UNITTEST_DATADIR") or ""
with io.open(os.path.join(datadir, filename), "r", encoding="utf-8") as handle:
js = json.load(handle)
self.assertEqual("DeviceDefinition", js["resourceType"])
return devicedefinition.DeviceDefinition(js)
def testDeviceDefinition1(self):
inst = self.instantiate_from("devicedefinition-example.json")
self.assertIsNotNone(inst, "Must have instantiated a DeviceDefinition instance")
self.implDeviceDefinition1(inst)
js = inst.as_json()
self.assertEqual("DeviceDefinition", js["resourceType"])
inst2 = devicedefinition.DeviceDefinition(js)
self.implDeviceDefinition1(inst2)
def implDeviceDefinition1(self, inst):
self.assertEqual(force_bytes(inst.id), force_bytes("example"))
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("0"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(
force_bytes(inst.text.div),
force_bytes(
'<div xmlns="http://www.w3.org/1999/xhtml"><p><b>Generated Narrative with Details</b></p><p><b>id</b>: example</p><p><b>identifier</b>: 0</p></div>'
),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
| 36.457627
| 164
| 0.670851
|
ef2a94f8adb3d4e4f1126410be588e0d6e9fae43
| 10,539
|
py
|
Python
|
imgur.py
|
CrushAndRun/sopel-Bodhi-extras
|
f1a616713fd71cb6873081b88d693cce63138d52
|
[
"EFL-1.0",
"EFL-2.0"
] | null | null | null |
imgur.py
|
CrushAndRun/sopel-Bodhi-extras
|
f1a616713fd71cb6873081b88d693cce63138d52
|
[
"EFL-1.0",
"EFL-2.0"
] | null | null | null |
imgur.py
|
CrushAndRun/sopel-Bodhi-extras
|
f1a616713fd71cb6873081b88d693cce63138d52
|
[
"EFL-1.0",
"EFL-2.0"
] | null | null | null |
# -*- coding: utf8 -*-
"""
imgur.py - Sopel imgur Information Module
Copyright © 2014, iceTwy, <icetwy@icetwy.re>
Licensed under the Eiffel Forum License 2.
"""
import json
import re
import os.path
import sys
if sys.version_info.major < 3:
from urllib2 import HTTPError
from urlparse import urlparse
else:
from urllib.request import HTTPError
from urllib.parse import urlparse
from sopel.config import ConfigurationError
from sopel import web, tools
from sopel.module import rule
class ImgurClient(object):
def __init__(self, client_id):
"""
Sets the client_id (obtain yours here: https://api.imgur.com/oauth2/addclient)
and the imgur API URL.
"""
self.client_id = client_id
self.api_url = "https://api.imgur.com/3/"
def request(self, input):
"""
Sends a request to the API. Only publicly available data is accessible.
Returns data as JSON.
"""
headers = {'Authorization': 'Client-ID ' + self.client_id,
'Accept': 'application/json'}
request = web.get(self.api_url + input, headers=headers)
#FIXME: raise for status
return json.loads(request)
def resource(self, resource, id):
"""
Retrieves a resource from the imgur API.
Returns data as JSON.
"""
api_request_path = '{0}/{1}'.format(resource, id)
return self.request(api_request_path)
def configure(config):
"""
The client ID can be obtained by registering your bot at
https://api.imgur.com/oauth2/addclient
| [imgur] | example | purpose |
| --------- | --------------- | -------------------------------- |
| client_id | 1b3cfe15768ba29 | Bot's ID, for Imgur's reference. |
"""
if config.option('Configure Imgur? (You will need to register at https://api.imgur.com/oauth2/addclient)', False):
config.interactive_add('imgur', 'client_id', 'Client ID')
def setup(bot):
"""
Tests the validity of the client ID given in the configuration.
If it is not, initializes sopel's memory callbacks for imgur URLs,
and uses them as the trigger for the link parsing function.
"""
try:
client = ImgurClient(bot.config.imgur.client_id)
client.request('gallery.json')
except HTTPError:
raise ConfigurationError('Could not validate the client ID with Imgur. \
Are you sure you set it up correctly?')
imgur_regex = re.compile('(?:https?://)?(?:i\.)?imgur\.com/(.*)$')
if not bot.memory.contains('url_callbacks'):
bot.memory['url_callbacks'] = tools.SopelMemory()
bot.memory['url_callbacks'][imgur_regex] = imgur
def album(link_id, bot):
"""
Handles information retrieval for non-gallery albums.
The bot will output the title, the number of images and the number of views
of the album.
"""
client = ImgurClient(bot.config.imgur.client_id)
api_response = client.resource('album', link_id)
album = api_response['data']
return bot.say('[imgur] [{0} - an album with {1} images and ' \
'{2} views]'.format(album['title'],
str(album['images_count']), \
str(album['views'])))
def gallery(link_id, bot):
"""
Handles information retrieval for gallery images and albums.
The bot will output the title, the type (image/album/gif), the number of
views, the number of upvotes/downvotes of the gallery resource.
"""
client = ImgurClient(bot.config.imgur.client_id)
api_response = client.resource('gallery', link_id)
gallery = api_response['data']
if gallery['is_album']:
return bot.say('[imgur] [{0} - a gallery album with {1} views ' \
'({2} ups and {3} downs)]'.format(gallery['title'], \
str(gallery['views']), \
str(gallery['ups']), \
str(gallery['downs'])))
if gallery['animated'] == True:
return bot.say('[imgur] [{0} - a gallery gif with {1} views ' \
'({2} ups and {3} downs)]'.format(gallery['title'], \
str(gallery['views']), \
str(gallery['ups']), \
str(gallery['downs'])))
else:
return bot.say('[imgur] [{0} - a gallery image with {1} views ' \
'({2} ups and {3} downs)]'.format(gallery['title'], \
str(gallery['views']),
str(gallery['ups']),
str(gallery['downs'])))
def user(username, bot):
"""
Handles information retrieval for user accounts.
The bot will output the name, and the numbers of submissions, comments and
liked resources, of the selected user.
"""
client = ImgurClient(bot.config.imgur.client_id)
api_response_account = client.resource('account', username)
api_response_gallery_profile = client.resource('account', username + '/gallery_profile')
account = api_response_account['data']
gallery_profile = api_response_gallery_profile['data']
return bot.say('[imgur] [{0} is an imgurian with {1} points of reputation, ' \
'{2} gallery submissions, {3} comments ' \
'and {4} likes]'.format(account['url'], \
str(account['reputation']), \
str(gallery_profile['total_gallery_submissions']), \
str(gallery_profile['total_gallery_comments']), \
str(gallery_profile['total_gallery_likes'])))
def image(link_id, bot):
"""
Handles information retrieval for non-gallery images.
The bot will output the title, the type (image/gif) and the number of views
of the selected image.
"""
client = ImgurClient(bot.config.imgur.client_id)
api_response = client.resource('image', link_id)
img = api_response['data']
if img['title']:
title = img['title']
if not img['title'] and img['description']:
title = img['description']
if not img['title'] and not img['description']:
title = 'untitled'
if img['animated']:
return bot.say('[imgur] [{0} - a gif with {1} views]'.format(title, \
str(img['views'])))
else:
return bot.say('[imgur] [{0} - an image with {1} views]'.format(title, \
str(img['views'])))
@rule('(?:https?://)?(?:i\.)?imgur\.com/(.*)$')
def imgur(bot, trigger):
"""
Parses the input URL and calls the appropriate function for the resource
(an image or an album).
imgur has two types of resources: non-gallery and gallery resources.
Non-gallery resources are images and albums that have not been uploaded
to the imgur gallery (imgur.com/gallery), whilst gallery resources have
been.
* imgur.com/id can refer to two distinct resources (i.e. a non-gallery image
and a gallery resource, e.g. imgur.com/VlmfH and imgur.com/gallery/VlmfH)
* i.imgur.com/id refers by default to the same non-gallery resource as
imgur.com/id, if there are two distinct resources for this ID.
It refers to the gallery resource if only the gallery resource exists.
* imgur.com/gallery/id refers solely to a gallery resource.
* imgur.com/a/id refers solely to an album. Non-gallery data is returned,
even if it is in the gallery.
* imgur.com/user/username refers solely to an imgur user account.
The regex rule above will capture either an ID to a gallery or non-gallery
image or album, or a path to a certain imgur resource (e.g. gallery/id,
user/username, and so forth).
It is more fool-proof to only demand gallery data from the imgur API
if we get a link that is of the form imgur.com/gallery/id, because
imgur IDs are not unique (see above) and we can trigger an error if
we request inexistant gallery data.
"""
#urlparse does not support URLs without a scheme.
#Add 'https' scheme to an URL if it has no scheme.
if not urlparse(trigger).scheme:
trigger = "https://" + trigger
"""Handle i.imgur.com links first.
They can link to non-gallery images, so we do not request gallery data,
but simply image data."""
if urlparse(trigger).netloc == 'i.imgur.com':
image_id = os.path.splitext(os.path.basename(urlparse(trigger).path))[0] # get the ID from the img
return image(image_id, bot)
"""Handle imgur.com/* links."""
#Get the path to the requested resource, from the URL (id, gallery/id, user/username, a/id)
resource_path = urlparse(trigger).path.lstrip('/')
#The following API endpoints require user authentication, which we do not support.
unauthorized = ['settings', 'notifications', 'message', 'stats']
if any(item in resource_path for item in unauthorized):
return bot.reply("[imgur] Unauthorized action.")
#Separate the URL path into an ordered list of the form ['gallery', 'id']
resource_path_parts = filter(None, resource_path.split('/'))
#Handle a simple link to imgur.com: no ID is given, meaning that the length of the above list is null
if len(resource_path_parts) == 0:
return
#Handle a link with a path that has more than two components
if len(resource_path_parts) > 2:
return bot.reply("[imgur] Invalid link.")
#Handle a link to an ID: imgur.com/id
if len(resource_path_parts) == 1:
return image(resource_path_parts[0], bot)
#Handle a link to a gallery image/album: imgur.com/gallery/id
if resource_path_parts[0] == 'gallery':
return gallery(resource_path_parts[1], bot)
#Handle a link to an user account/profile: imgur.com/user/username
if resource_path_parts[0] == 'user':
return user(resource_path_parts[1], bot)
#Handle a link to an album: imgur.com/a/id
if resource_path_parts[0] == 'a':
return album(resource_path_parts[1], bot)
| 43.016327
| 118
| 0.594554
|
46e5395e173a96d8eef21a5945430b8c6977ec04
| 48,916
|
py
|
Python
|
test/test_locale_preferences.py
|
greenpau/py_insightvm_sdk
|
bd881f26e14cb9f0f9c47927469ec992de9de8e6
|
[
"Apache-2.0"
] | 2
|
2019-03-15T16:05:54.000Z
|
2020-07-19T18:37:50.000Z
|
test/test_locale_preferences.py
|
greenpau/py_insightvm_sdk
|
bd881f26e14cb9f0f9c47927469ec992de9de8e6
|
[
"Apache-2.0"
] | 1
|
2021-03-26T04:46:12.000Z
|
2021-03-26T04:51:23.000Z
|
test/test_locale_preferences.py
|
greenpau/py_insightvm_sdk
|
bd881f26e14cb9f0f9c47927469ec992de9de8e6
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-like` ` not-like` | | `container-status` | `is` ` is-not` | | `containers` | `are` | | `criticality-tag` | `is` ` is-not` ` is-greater-than` ` is-less-than` ` is-applied` ` is-not-applied` | | `custom-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `cve` | `is` ` is-not` ` contains` ` does-not-contain` | | `cvss-access-complexity` | `is` ` is-not` | | `cvss-authentication-required` | `is` ` is-not` | | `cvss-access-vector` | `is` ` is-not` | | `cvss-availability-impact` | `is` ` is-not` | | `cvss-confidentiality-impact` | `is` ` is-not` | | `cvss-integrity-impact` | `is` ` is-not` | | `cvss-v3-confidentiality-impact` | `is` ` is-not` | | `cvss-v3-integrity-impact` | `is` ` is-not` | | `cvss-v3-availability-impact` | `is` ` is-not` | | `cvss-v3-attack-vector` | `is` ` is-not` | | `cvss-v3-attack-complexity` | `is` ` is-not` | | `cvss-v3-user-interaction` | `is` ` is-not` | | `cvss-v3-privileges-required` | `is` ` is-not` | | `host-name` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-empty` ` is-not-empty` ` is-like` ` not-like` | | `host-type` | `in` ` not-in` | | `ip-address` | `is` ` is-not` ` in-range` ` not-in-range` ` is-like` ` not-like` | | `ip-address-type` | `in` ` not-in` | | `last-scan-date` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `location-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `open-ports` | `is` ` is-not` ` in-range` | | `operating-system` | `contains` ` does-not-contain` ` is-empty` ` is-not-empty` | | `owner-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is-not` ` in-range` ` greater-than` ` less-than` | | `service-name` | `contains` ` does-not-contain` | | `site-id` | `in` ` not-in` | | `software` | `contains` ` does-not-contain` | | `vAsset-cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `vAsset-datacenter` | `is` ` is-not` | | `vAsset-host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `vAsset-power-state` | `in` ` not-in` | | `vAsset-resource-pool-path` | `contains` ` does-not-contain` | | `vulnerability-assessed` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vulnerability-category` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` | | `vulnerability-cvss-v3-score` | `is` ` is-not` | | `vulnerability-cvss-score` | `is` ` is-not` ` in-range` ` is-greater-than` ` is-less-than` | | `vulnerability-exposures` | `includes` ` does-not-include` | | `vulnerability-title` | `contains` ` does-not-contain` ` is` ` is-not` ` starts-with` ` ends-with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `numeric` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import py_insightvm_sdk
from py_insightvm_sdk.models.locale_preferences import LocalePreferences # noqa: E501
from py_insightvm_sdk.rest import ApiException
class TestLocalePreferences(unittest.TestCase):
"""LocalePreferences unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testLocalePreferences(self):
"""Test LocalePreferences"""
# FIXME: construct object with mandatory attributes with example values
# model = py_insightvm_sdk.models.locale_preferences.LocalePreferences() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 1,193.073171
| 48,045
| 0.490637
|
029a0f498091dfe3890b883304ccfde423c7f359
| 5,595
|
py
|
Python
|
api/dialogflow_intents.py
|
damianomiotek/best_transport_Poland
|
601c49402dc738dbdd63b811b4b313dbc5204791
|
[
"MIT"
] | null | null | null |
api/dialogflow_intents.py
|
damianomiotek/best_transport_Poland
|
601c49402dc738dbdd63b811b4b313dbc5204791
|
[
"MIT"
] | 2
|
2021-04-30T20:52:32.000Z
|
2021-06-10T21:55:38.000Z
|
api/dialogflow_intents.py
|
damianomiotek/best_transport_Poland
|
601c49402dc738dbdd63b811b4b313dbc5204791
|
[
"MIT"
] | null | null | null |
from api.others import *
from api.handling_pressing_item_of_dialogflow_list import *
intents = {
"Default Welcome Intent": default_welcome_intent,
"Others": others,
"Contact": contact,
"About us": about_us,
"Address": address,
"Car fleet": car_fleet,
"Account": account,
"Values of lists": values_of_lists,
"Access Google account": access_google_account,
"After sign in": after_sign_in,
"Delete account": delete_account,
"Delete account - yes": delete_account_yes,
"Delete account - no": delete_account_no,
"One user information": one_user_information,
"Tell username": tell_username,
"Tell name": tell_name,
"Tell surname": tell_surname,
"Tell email": tell_email,
"Tell phone number": tell_phone_number,
"Tell tax number": tell_tax_number,
"Tell place name": tell_place_name,
"Tell street": tell_street,
"Tell post code": tell_post_code,
"Tell post place": tell_post_place,
"Tell country": tell_country,
"Tell company tax number": tell_company_tax_number,
"Tell company email": tell_company_email,
"Tell company phone number": tell_company_phone_number,
"Tell company name": tell_company_name,
"Tell company street": tell_company_street,
"Tell company post code": tell_company_post_code,
"Tell company post place": tell_company_post_place,
"Tell company country": tell_company_country,
"Tell all account information": tell_all_account_information,
"Ask change account settings": ask_change_account_settings,
"Change personal data settings": change_personal_data_settings,
"Change personal settings - yes": change_personal_settings_yes,
"Change personal settings - no": change_personal_settings_no,
"Change company data settings": change_company_data_settings,
"Change company settings - yes": change_company_settings_yes,
"Change company settings - no": change_company_settings_no,
"Change one account information": change_one_account_information,
"Change user name": change_user_name,
"Change user name - custom": change_user_name_custom,
"Change user surname": change_user_surname,
"Change user surname - custom": change_user_surname_custom,
"Change user username": change_user_username,
"Change user username - custom": change_user_username_custom,
"Change user password": "",
"Change user password - custom": "",
"Change user email": "",
"Change user email - custom": "",
"Offers": offers,
"Inquiries": inquiries,
"Browse offers": browse_offers,
"Ask search offers": ask_search_offers,
"Search offers": search_offers,
"Ask search offer after id": ask_search_offer_id,
"Search offer id": search_offer_id,
"Ask until when offer valid": ask_until_when_offer_valid,
"Check offer valid": check_offer_valid,
"Check offer valid id": check_offer_valid_id,
"Create a inquiry": create_inquiry,
"Get inquiry title ask text": get_inquiry_title_ask_text,
"Get inquiry text": get_inquiry_text,
"Get email and phone number for inquiry": get_email_phone_for_inquiry,
"Search inquiry or response": search_inquiry_response,
"Ask search response": ask_search_response,
"Ask search inquiry": ask_search_inquiry,
"Search inquiry": search_inquiry,
"Search response": search_response,
"Ask creating inquiry for": ask_creating_inquiry_for,
"Ask search offer for creating inquiry": ask_search_offer_for_creating_inquiry,
"Ask search order for creating inquiry": ask_search_order_for_creating_inquiry,
"Get offers for inquiry": get_offers_for_inquiry,
"Are there unread responses": are_there_unread_responses,
"Unread responses": unread_responses,
"Ask responses for inquiry": ask_responses_for_inquiry,
"Responses for inquiry": responses_for_inquiry,
"Ask search response for": ask_search_response_for,
"Ask search response for offer": ask_search_response_for_offer,
"Search responses for offer": search_responses_for_offer,
"Ask create response to admin": ask_create_response_to_admnin,
"Create response to admin": create_response_to_admin,
"Orders": orders,
"Browse current orders": browse_current_orders,
"The history of orders": orders_history,
"Order number": order_number,
"Ask search orders": ask_search_orders,
"Search order": search_orders,
"Ask order accepted for execution": ask_order_accepted,
"Check order accepted for execution": check_order_accepted_for_execution,
"Ask order realised": ask_order_realised,
"Check order realised": check_order_realised,
"Ask remove order": ask_remove_order,
"Remove order": remove_order,
"Remove order - no": remove_order_no,
"Remove order - yes": remove_order_yes,
"Ask create order": ask_create_order,
"Create order loadings places dates": create_order_loadings_places_dates,
"Create order loadings dates": create_order_loadings_dates,
"Create order destinations places dates": create_order_destinations_places_dates,
"Create order destinations dates": create_order_destinations_dates,
"Create order destinations dates - yes": create_order_destinations_dates_yes,
"Create order destinations dates - no": create_order_destinations_dates_no,
"Create order additional notes": create_order_additional_notes,
"Create order price": create_order_price,
"Create order telephone email": create_order_email_phone_number,
"Create order": create_order,
"Create order - no": create_order_no,
"Create order - yes": create_order_yes
}
| 47.820513
| 85
| 0.745308
|
c09c06cba2c409304cf8fc5cbac99d8a049fb545
| 18,143
|
py
|
Python
|
recipes/recipe_modules/gclient/config.py
|
panadrota/haiku-depot_tools
|
53075227f31e38bf875dad98ec57d53076316d6f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-01T06:16:55.000Z
|
2021-09-01T06:16:55.000Z
|
recipes/recipe_modules/gclient/config.py
|
panadrota/haiku-depot_tools
|
53075227f31e38bf875dad98ec57d53076316d6f
|
[
"BSD-3-Clause"
] | null | null | null |
recipes/recipe_modules/gclient/config.py
|
panadrota/haiku-depot_tools
|
53075227f31e38bf875dad98ec57d53076316d6f
|
[
"BSD-3-Clause"
] | 4
|
2020-03-15T13:28:11.000Z
|
2021-12-13T08:57:39.000Z
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
try:
_STRING_TYPE = basestring
except NameError: # pragma: no cover
_STRING_TYPE = str
from recipe_engine.config import config_item_context, ConfigGroup, BadConf
from recipe_engine.config import ConfigList, Dict, Single, Static, Set, List
from . import api as gclient_api
def BaseConfig(USE_MIRROR=True, CACHE_DIR=None,
BUILDSPEC_VERSION=None, deps_file='.DEPS.git', **_kwargs):
cache_dir = str(CACHE_DIR) if CACHE_DIR else None
return ConfigGroup(
solutions = ConfigList(
lambda: ConfigGroup(
name = Single(_STRING_TYPE),
url = Single((_STRING_TYPE, type(None)), empty_val=''),
deps_file = Single(_STRING_TYPE, empty_val=deps_file, required=False,
hidden=False),
managed = Single(bool, empty_val=True, required=False, hidden=False),
custom_deps = Dict(value_type=(_STRING_TYPE, type(None))),
custom_vars = Dict(value_type=(_STRING_TYPE, bool)),
safesync_url = Single(_STRING_TYPE, required=False),
revision = Single(
(_STRING_TYPE, gclient_api.RevisionResolver),
required=False, hidden=True),
)
),
deps_os = Dict(value_type=_STRING_TYPE),
hooks = List(_STRING_TYPE),
target_os = Set(_STRING_TYPE),
target_os_only = Single(bool, empty_val=False, required=False),
target_cpu = Set(_STRING_TYPE),
target_cpu_only = Single(bool, empty_val=False, required=False),
cache_dir = Static(cache_dir, hidden=False),
# If supplied, use this as the source root (instead of the first solution's
# checkout).
src_root = Single(_STRING_TYPE, required=False, hidden=True),
# Maps 'solution' -> build_property
# TODO(machenbach): Deprecate this in favor of the one below.
# http://crbug.com/713356
got_revision_mapping = Dict(hidden=True),
# Maps build_property -> 'solution'
got_revision_reverse_mapping = Dict(hidden=True),
# Addition revisions we want to pass in. For now there's a duplication
# of code here of setting custom vars AND passing in --revision. We hope
# to remove custom vars later.
revisions = Dict(
value_type=(_STRING_TYPE, gclient_api.RevisionResolver),
hidden=True),
# TODO(iannucci): HACK! The use of None here to indicate that we apply this
# to the solution.revision field is really terrible. I mostly blame
# gclient.
# Maps 'parent_build_property' -> 'custom_var_name'
# Maps 'parent_build_property' -> None
# If value is None, the property value will be applied to
# solutions[0].revision. Otherwise, it will be applied to
# solutions[0].custom_vars['custom_var_name']
parent_got_revision_mapping = Dict(hidden=True),
delete_unversioned_trees = Single(bool, empty_val=True, required=False),
# Maps canonical repo URL to (local_path, revision).
# - canonical gitiles repo URL is "https://<host>/<project>"
# where project does not have "/a/" prefix or ".git" suffix.
# - solution/path is then used to apply patches as patch root in
# bot_update.
# - if revision is given, it's passed verbatim to bot_update for
# corresponding dependency. Otherwise (i.e. None), the patch will be
# applied on top of version pinned in DEPS.
# This is essentially a whitelist of which repos inside a solution
# can be patched automatically by bot_update based on
# api.buildbucket.build.input.gerrit_changes[0].project
# For example, if bare chromium solution has this entry in repo_path_map
# 'https://chromium.googlesource.com/angle/angle': (
# 'src/third_party/angle', 'HEAD')
# then a patch to Angle project can be applied to a chromium src's
# checkout after first updating Angle's repo to its main's HEAD.
repo_path_map = Dict(value_type=tuple, hidden=True),
# Check out refs/branch-heads.
# TODO (machenbach): Only implemented for bot_update atm.
with_branch_heads = Single(
bool,
empty_val=False,
required=False,
hidden=True),
# Check out refs/tags.
with_tags = Single(
bool,
empty_val=False,
required=False,
hidden=True),
disable_syntax_validation = Single(bool, empty_val=False, required=False),
USE_MIRROR = Static(bool(USE_MIRROR)),
BUILDSPEC_VERSION= Static(BUILDSPEC_VERSION, hidden=True),
)
config_ctx = config_item_context(BaseConfig)
def ChromiumGitURL(_c, *pieces):
return '/'.join(('https://chromium.googlesource.com',) + pieces)
# TODO(phajdan.jr): Move to proper repo and add coverage.
def ChromeInternalGitURL(_c, *pieces): # pragma: no cover
return '/'.join(('https://chrome-internal.googlesource.com',) + pieces)
@config_ctx()
def disable_syntax_validation(c):
c.disable_syntax_validation = True
@config_ctx()
def android(c):
c.target_os.add('android')
@config_ctx()
def nacl(c):
s = c.solutions.add()
s.name = 'native_client'
s.url = ChromiumGitURL(c, 'native_client', 'src', 'native_client.git')
m = c.got_revision_mapping
m['native_client'] = 'got_revision'
@config_ctx()
def webports(c):
s = c.solutions.add()
s.name = 'src'
s.url = ChromiumGitURL(c, 'webports.git')
m = c.got_revision_mapping
m['src'] = 'got_revision'
@config_ctx()
def emscripten_releases(c):
s = c.solutions.add()
s.name = 'emscripten-releases'
s.url = ChromiumGitURL(c, 'emscripten-releases.git')
m = c.got_revision_mapping
m['emscripten-releases'] = 'got_revision'
@config_ctx()
def gyp(c):
s = c.solutions.add()
s.name = 'gyp'
s.url = ChromiumGitURL(c, 'external', 'gyp.git')
m = c.got_revision_mapping
m['gyp'] = 'got_revision'
@config_ctx()
def build(c):
s = c.solutions.add()
s.name = 'build'
s.url = ChromiumGitURL(c, 'chromium', 'tools', 'build.git')
m = c.got_revision_mapping
m['build'] = 'got_revision'
@config_ctx()
def depot_tools(c): # pragma: no cover
s = c.solutions.add()
s.name = 'depot_tools'
s.url = ChromiumGitURL(c, 'chromium', 'tools', 'depot_tools.git')
m = c.got_revision_mapping
m['depot_tools'] = 'got_revision'
@config_ctx()
def skia(c): # pragma: no cover
s = c.solutions.add()
s.name = 'skia'
s.url = 'https://skia.googlesource.com/skia.git'
m = c.got_revision_mapping
m['skia'] = 'got_revision'
@config_ctx()
def skia_buildbot(c): # pragma: no cover
s = c.solutions.add()
s.name = 'skia_buildbot'
s.url = 'https://skia.googlesource.com/buildbot.git'
m = c.got_revision_mapping
m['skia_buildbot'] = 'got_revision'
@config_ctx()
def chrome_golo(c): # pragma: no cover
s = c.solutions.add()
s.name = 'chrome_golo'
s.url = 'https://chrome-internal.googlesource.com/chrome-golo/chrome-golo.git'
c.got_revision_mapping['chrome_golo'] = 'got_revision'
@config_ctx()
def infra_puppet(c): # pragma: no cover
s = c.solutions.add()
s.name = 'infra_puppet'
s.url = 'https://chrome-internal.googlesource.com/infra/puppet.git'
c.got_revision_mapping['infra_puppet'] = 'got_revision'
@config_ctx()
def build_internal(c):
s = c.solutions.add()
s.name = 'build_internal'
s.url = 'https://chrome-internal.googlesource.com/chrome/tools/build.git'
c.got_revision_mapping['build_internal'] = 'got_revision'
# We do not use 'includes' here, because we want build_internal to be the
# first solution in the list as run_presubmit computes upstream revision
# from the first solution.
build(c)
c.got_revision_mapping['build'] = 'got_build_revision'
@config_ctx()
def build_internal_scripts_slave(c):
s = c.solutions.add()
s.name = 'build_internal/scripts/slave'
s.url = ('https://chrome-internal.googlesource.com/'
'chrome/tools/build_limited/scripts/slave.git')
c.got_revision_mapping['build_internal/scripts/slave'] = 'got_revision'
# We do not use 'includes' here, because we want build_internal to be the
# first solution in the list as run_presubmit computes upstream revision
# from the first solution.
build(c)
c.got_revision_mapping['build'] = 'got_build_revision'
@config_ctx()
def master_deps(c):
s = c.solutions.add()
s.name = 'master.DEPS'
s.url = ('https://chrome-internal.googlesource.com/'
'chrome/tools/build/master.DEPS.git')
c.got_revision_mapping['master.DEPS'] = 'got_revision'
@config_ctx()
def slave_deps(c):
s = c.solutions.add()
s.name = 'slave.DEPS'
s.url = ('https://chrome-internal.googlesource.com/'
'chrome/tools/build/slave.DEPS.git')
c.got_revision_mapping['slave.DEPS'] = 'got_revision'
@config_ctx()
def internal_deps(c):
s = c.solutions.add()
s.name = 'internal.DEPS'
s.url = ('https://chrome-internal.googlesource.com/'
'chrome/tools/build/internal.DEPS.git')
c.got_revision_mapping['internal.DEPS'] = 'got_revision'
@config_ctx()
def pdfium(c):
soln = c.solutions.add()
soln.name = 'pdfium'
soln.url = 'https://pdfium.googlesource.com/pdfium.git'
m = c.got_revision_mapping
m['pdfium'] = 'got_revision'
@config_ctx()
def crashpad(c):
soln = c.solutions.add()
soln.name = 'crashpad'
soln.url = 'https://chromium.googlesource.com/crashpad/crashpad.git'
@config_ctx()
def boringssl(c):
soln = c.solutions.add()
soln.name = 'boringssl'
soln.url = 'https://boringssl.googlesource.com/boringssl.git'
soln.deps_file = 'util/bot/DEPS'
@config_ctx()
def dart(c):
soln = c.solutions.add()
soln.name = 'sdk'
soln.url = ('https://dart.googlesource.com/sdk.git')
soln.deps_file = 'DEPS'
soln.managed = False
@config_ctx()
def expect_tests(c):
soln = c.solutions.add()
soln.name = 'expect_tests'
soln.url = 'https://chromium.googlesource.com/infra/testing/expect_tests.git'
c.got_revision_mapping['expect_tests'] = 'got_revision'
@config_ctx()
def infra(c):
soln = c.solutions.add()
soln.name = 'infra'
soln.url = 'https://chromium.googlesource.com/infra/infra.git'
c.got_revision_mapping['infra'] = 'got_revision'
c.repo_path_map.update({
'https://chromium.googlesource.com/infra/luci/gae': (
'infra/go/src/go.chromium.org/gae', 'HEAD'),
'https://chromium.googlesource.com/infra/luci/luci-py': (
'infra/luci', 'HEAD'),
'https://chromium.googlesource.com/infra/luci/luci-go': (
'infra/go/src/go.chromium.org/luci', 'HEAD'),
'https://chromium.googlesource.com/infra/luci/recipes-py': (
'infra/recipes-py', 'HEAD')
})
@config_ctx()
def infra_internal(c): # pragma: no cover
soln = c.solutions.add()
soln.name = 'infra_internal'
soln.url = 'https://chrome-internal.googlesource.com/infra/infra_internal.git'
c.got_revision_mapping['infra_internal'] = 'got_revision'
@config_ctx(includes=['infra'])
def luci_gae(c):
# luci/gae is checked out as a part of infra.git solution at HEAD.
c.revisions['infra'] = 'refs/heads/main'
# luci/gae is developed together with luci-go, which should be at HEAD.
c.revisions['infra/go/src/go.chromium.org/luci'] = 'refs/heads/main'
c.revisions['infra/go/src/go.chromium.org/gae'] = (
gclient_api.RevisionFallbackChain('refs/heads/main'))
m = c.got_revision_mapping
del m['infra']
m['infra/go/src/go.chromium.org/gae'] = 'got_revision'
@config_ctx(includes=['infra'])
def luci_go(c):
# luci-go is checked out as a part of infra.git solution at HEAD.
c.revisions['infra'] = 'refs/heads/main'
c.revisions['infra/go/src/go.chromium.org/luci'] = (
gclient_api.RevisionFallbackChain('refs/heads/main'))
m = c.got_revision_mapping
del m['infra']
m['infra/go/src/go.chromium.org/luci'] = 'got_revision'
@config_ctx(includes=['infra'])
def luci_py(c):
# luci-py is checked out as part of infra just to have appengine
# pre-installed, as that's what luci-py PRESUBMIT relies on.
c.revisions['infra'] = 'refs/heads/main'
c.revisions['infra/luci'] = (
gclient_api.RevisionFallbackChain('refs/heads/main'))
m = c.got_revision_mapping
del m['infra']
m['infra/luci'] = 'got_revision'
@config_ctx(includes=['infra'])
def recipes_py(c):
c.revisions['infra'] = 'refs/heads/main'
c.revisions['infra/recipes-py'] = (
gclient_api.RevisionFallbackChain('refs/heads/main'))
m = c.got_revision_mapping
del m['infra']
m['infra/recipes-py'] = 'got_revision'
@config_ctx()
def recipes_py_bare(c):
soln = c.solutions.add()
soln.name = 'recipes-py'
soln.url = 'https://chromium.googlesource.com/infra/luci/recipes-py'
c.got_revision_mapping['recipes-py'] = 'got_revision'
@config_ctx()
def catapult(c):
soln = c.solutions.add()
soln.name = 'catapult'
soln.url = 'https://chromium.googlesource.com/catapult'
c.got_revision_mapping['catapult'] = 'got_revision'
@config_ctx(includes=['infra_internal'])
def infradata_master_manager(c):
soln = c.solutions.add()
soln.name = 'infra-data-master-manager'
soln.url = (
'https://chrome-internal.googlesource.com/infradata/master-manager.git')
del c.got_revision_mapping['infra_internal']
c.got_revision_mapping['infra-data-master-manager'] = 'got_revision'
@config_ctx()
def infradata_config(c):
soln = c.solutions.add()
soln.name = 'infra-data-config'
soln.url = 'https://chrome-internal.googlesource.com/infradata/config.git'
c.got_revision_mapping['infra-data-config'] = 'got_revision'
@config_ctx()
def infradata_rbe(c):
soln = c.solutions.add()
soln.name = 'infradata-rbe'
soln.url = 'https://chrome-internal.googlesource.com/infradata/rbe.git'
c.got_revision_mapping['infradata-rbe'] = 'got_revision'
@config_ctx()
def with_branch_heads(c):
c.with_branch_heads = True
@config_ctx()
def with_tags(c):
c.with_tags = True
@config_ctx()
def custom_tabs_client(c):
soln = c.solutions.add()
soln.name = 'custom_tabs_client'
# TODO(pasko): test custom-tabs-client within a full chromium checkout.
soln.url = 'https://chromium.googlesource.com/custom-tabs-client'
c.got_revision_mapping['custom_tabs_client'] = 'got_revision'
@config_ctx()
def gerrit_test_cq_normal(c):
soln = c.solutions.add()
soln.name = 'gerrit-test-cq-normal'
soln.url = 'https://chromium.googlesource.com/playground/gerrit-cq/normal.git'
@config_ctx()
def dawn(c):
soln = c.solutions.add()
soln.name = 'dawn'
soln.url = 'https://dawn.googlesource.com/dawn.git'
c.got_revision_mapping['dawn'] = 'got_revision'
@config_ctx()
def celab(c):
soln = c.solutions.add()
# soln.name must match the repo name for `dep` to work properly
soln.name = 'cel'
soln.url = 'https://chromium.googlesource.com/enterprise/cel.git'
c.got_revision_mapping['cel'] = 'got_revision'
@config_ctx()
def openscreen(c):
s = c.solutions.add()
s.name = 'openscreen'
s.url = 'https://chromium.googlesource.com/openscreen'
c.got_revision_mapping['openscreen'] = 'got_revision'
@config_ctx()
def devtools(c):
s = c.solutions.add()
s.name = 'devtools'
s.url = 'https://chromium.googlesource.com/devtools/devtools-frontend.git'
c.got_revision_mapping['devtools'] = 'got_revision'
c.repo_path_map.update({
'https://chromium.googlesource.com/devtools/devtools-frontend': (
'devtools/devtools-frontend', 'HEAD'),
})
@config_ctx()
def tint(c):
soln = c.solutions.add()
soln.name = 'tint'
soln.url = 'https://dawn.googlesource.com/tint.git'
c.got_revision_mapping['tint'] = 'got_revision'
@config_ctx()
def gerrit_plugins_binary_size(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_binary_size'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'binary-size.git')
c.got_revision_mapping['gerrit_plugins_binary_size'] = 'got_revision'
@config_ctx()
def gerrit_plugins_buildbucket(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_buildbucket'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'buildbucket.git')
c.got_revision_mapping['gerrit_plugins_buildbucket'] = 'got_revision'
@config_ctx()
def gerrit_plugins_chromium_behavior(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_chromium_behavior'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'chromium-behavior.git')
c.got_revision_mapping['gerrit_plugins_chromium_behavior'] = 'got_revision'
@config_ctx()
def gerrit_plugins_chromium_binary_size(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_chromium_binary_size'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins',
'chromium-binary-size.git')
c.got_revision_mapping['gerrit_plugins_chromium_binary_size'] = 'got_revision'
@config_ctx()
def gerrit_plugins_chromium_style(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_chromium_style'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'chromium-style.git')
c.got_revision_mapping['gerrit_plugins_binary_size'] = 'got_revision'
@config_ctx()
def gerrit_plugins_chumpdetector(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_chumpdetector'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'chumpdetector.git')
c.got_revision_mapping['gerrit_plugins_chumpdetector'] = 'got_revision'
@config_ctx()
def gerrit_plugins_code_coverage(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_code_coverage'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'code-coverage.git')
c.got_revision_mapping['gerrit_plugins_code_coverage'] = 'got_revision'
@config_ctx()
def gerrit_plugins_git_numberer(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_git_numberer'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'git-numberer.git')
c.got_revision_mapping['gerrit_plugins_git_numberer'] = 'got_revision'
@config_ctx()
def gerrit_plugins_landingwidget(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_landingwidget'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'landingwidget.git')
c.got_revision_mapping['gerrit_plugins_landingwidget'] = 'got_revision'
@config_ctx()
def gerrit_plugins_tricium(c):
s = c.solutions.add()
s.name = 'gerrit_plugins_tricium'
s.url = ChromiumGitURL(c, 'infra', 'gerrit-plugins', 'tricium.git')
c.got_revision_mapping['gerrit_plugins_tricium'] = 'got_revision'
| 34.361742
| 80
| 0.704128
|
6749c34011d9223212dcd57771e8a11c6b1316d2
| 3,115
|
py
|
Python
|
astropy/io/misc/asdf/tags/transform/tabular.py
|
ycopin/astropy
|
4c29ecd4239fc2e8f62780a9bbbeacec8e592461
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/io/misc/asdf/tags/transform/tabular.py
|
ycopin/astropy
|
4c29ecd4239fc2e8f62780a9bbbeacec8e592461
|
[
"BSD-3-Clause"
] | 1
|
2018-11-14T14:18:55.000Z
|
2020-01-21T10:36:05.000Z
|
astropy/io/misc/asdf/tags/transform/tabular.py
|
ycopin/astropy
|
4c29ecd4239fc2e8f62780a9bbbeacec8e592461
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_equal
from asdf import yamlutil
from ...... import modeling
from ...... import units as u
from .basic import TransformType
from ......tests.helper import assert_quantity_allclose
__all__ = ['TabularType']
class TabularType(TransformType):
name = "transform/tabular"
version = '1.2.0'
types = [
modeling.models.Tabular2D, modeling.models.Tabular1D
]
@classmethod
def from_tree_transform(cls, node, ctx):
lookup_table = node.pop("lookup_table")
dim = lookup_table.ndim
fill_value = node.pop("fill_value", None)
if dim == 1:
# The copy is necessary because the array is memory mapped.
points = (node['points'][0][:],)
model = modeling.models.Tabular1D(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value)
elif dim == 2:
points = tuple([p[:] for p in node['points']])
model = modeling.models.Tabular2D(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value)
else:
tabular_class = modeling.models.tabular_model(dim, name)
points = tuple([p[:] for p in node['points']])
model = tabular_class(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value)
return model
@classmethod
def to_tree_transform(cls, model, ctx):
node = {}
node["fill_value"] = model.fill_value
node["lookup_table"] = model.lookup_table
node["points"] = [p for p in model.points]
node["method"] = str(model.method)
node["bounds_error"] = model.bounds_error
node["name"] = model.name
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
if isinstance(a.lookup_table, u.Quantity):
assert_quantity_allclose(a.lookup_table, b.lookup_table)
assert_quantity_allclose(a.points, b.points)
for i in range(len(a.bounding_box)):
assert_quantity_allclose(a.bounding_box[i], b.bounding_box[i])
else:
assert_array_equal(a.lookup_table, b.lookup_table)
assert_array_equal(a.points, b.points)
assert_array_equal(a.bounding_box, b.bounding_box)
assert (a.method == b.method)
if a.fill_value is None:
assert b.fill_value is None
elif np.isnan(a.fill_value):
assert np.isnan(b.fill_value)
else:
assert(a.fill_value == b.fill_value)
assert(a.bounds_error == b.bounds_error)
| 38.9375
| 103
| 0.594543
|
beb2afce49762a656dd1e5c8a3cbf17a5130bb5f
| 3,109
|
py
|
Python
|
graph_ter_cls/transforms/global_rotate.py
|
RicardoLanJ/graph-ter
|
3b9bda527a6a9559be835c5b84e6491ac8c5aa30
|
[
"MIT"
] | 58
|
2020-03-24T16:06:21.000Z
|
2022-03-26T07:04:28.000Z
|
graph_ter_cls/transforms/global_rotate.py
|
RicardoLanJ/graph-ter
|
3b9bda527a6a9559be835c5b84e6491ac8c5aa30
|
[
"MIT"
] | 6
|
2020-04-02T08:52:37.000Z
|
2020-11-27T12:27:23.000Z
|
graph_ter_cls/transforms/global_rotate.py
|
RicardoLanJ/graph-ter
|
3b9bda527a6a9559be835c5b84e6491ac8c5aa30
|
[
"MIT"
] | 19
|
2020-03-29T18:23:55.000Z
|
2021-12-25T04:10:00.000Z
|
import json
import random
import numpy as np
from graph_ter_cls.transforms import utils
from graph_ter_cls.transforms.transformer import Transformer
class GlobalRotate(Transformer):
def __init__(self,
num_samples=256,
mode='isotropic', # isotropic or anisotropic
transform_range=(-np.pi / 36.0, np.pi / 36.0)):
super().__init__(out_features=3)
self.num_samples = num_samples
self.mode = mode
self.low, self.high = utils.get_range(transform_range)
@staticmethod
def _build_rotation_matrix(parameters):
theta_x = parameters[0]
theta_y = parameters[1]
theta_z = parameters[2]
matrix_x = np.eye(3)
matrix_x[1, 1] = np.cos(theta_x)
matrix_x[1, 2] = -np.sin(theta_x)
matrix_x[2, 1] = -matrix_x[1, 2]
matrix_x[2, 2] = matrix_x[1, 1]
matrix_y = np.eye(3)
matrix_y[0, 0] = np.cos(theta_y)
matrix_y[0, 2] = np.sin(theta_y)
matrix_y[2, 0] = -matrix_y[0, 2]
matrix_y[2, 2] = matrix_y[0, 0]
matrix_z = np.eye(3)
matrix_z[0, 0] = np.cos(theta_z)
matrix_z[0, 1] = -np.sin(theta_z)
matrix_z[1, 0] = -matrix_z[0, 1]
matrix_z[1, 1] = matrix_z[0, 0]
matrix = np.matmul(matrix_z, np.matmul(matrix_y, matrix_x))
return matrix
def __call__(self, x):
num_points = x.shape[-1]
if self.mode.startswith('aniso'):
matrix = np.random.uniform(
low=self.low, high=self.high, size=(3, self.num_samples)
)
else:
matrix = np.random.uniform(
low=self.low, high=self.high, size=(3, 1)
)
matrix = np.repeat(matrix, self.num_samples, axis=1)
mask = np.sort(random.sample(range(num_points), self.num_samples))
y = x.copy()
if self.mode.startswith('aniso'):
for index, transform_id in enumerate(mask):
rotation_mat = self._build_rotation_matrix(matrix[:, index])
y[:, transform_id] = np.dot(rotation_mat, y[:, transform_id])
else:
rotation_mat = self._build_rotation_matrix(matrix[:, 0])
y[:, mask] = np.dot(rotation_mat, y[:, mask])
mask = np.repeat(np.expand_dims(mask, axis=0), 3, axis=0)
return y, matrix, mask
def __repr__(self):
info = self.get_config()
info_json = json.dumps(info, sort_keys=False, indent=2)
return info_json
def get_config(self):
result = {
'name': self.__class__.__name__,
'sampled points': self.num_samples,
'mode': self.mode,
'range': (self.low, self.high)
}
return result
def main():
x = np.array([[1, 2, 3, 4, 5, 6, 7],
[1, 2, 3, 4, 5, 6, 7],
[1, 2, 3, 4, 5, 6, 7]], dtype=float)
transform = GlobalRotate(num_samples=2, mode='isotropic')
y, m, mask = transform(x)
print(x)
print(y)
print(m)
print(mask)
if __name__ == '__main__':
main()
| 31.09
| 77
| 0.558701
|
75e5c37d11d1f76adf2f9925277e8340f54756be
| 272
|
py
|
Python
|
examples/datasets/__init__.py
|
RabbitWhite1/torchrec
|
031bcca5300d52099eb7490ff06fe0301c1c02f2
|
[
"BSD-3-Clause"
] | 814
|
2022-02-23T17:24:14.000Z
|
2022-03-31T16:52:23.000Z
|
examples/datasets/__init__.py
|
RabbitWhite1/torchrec
|
031bcca5300d52099eb7490ff06fe0301c1c02f2
|
[
"BSD-3-Clause"
] | 89
|
2022-02-23T17:29:56.000Z
|
2022-03-31T23:44:13.000Z
|
examples/datasets/__init__.py
|
RabbitWhite1/torchrec
|
031bcca5300d52099eb7490ff06fe0301c1c02f2
|
[
"BSD-3-Clause"
] | 68
|
2022-02-23T17:42:17.000Z
|
2022-03-28T06:39:55.000Z
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from . import criteo_dataframes # noqa
| 30.222222
| 71
| 0.753676
|
33879f20b7803525c1ca7b06a3f05d8145f6b246
| 2,281
|
py
|
Python
|
ship/monit.py
|
universitatjaumei/ship
|
469e35def23791131dbca2b2df64beac873b1149
|
[
"MIT"
] | null | null | null |
ship/monit.py
|
universitatjaumei/ship
|
469e35def23791131dbca2b2df64beac873b1149
|
[
"MIT"
] | 11
|
2015-09-01T07:26:43.000Z
|
2015-12-14T09:48:07.000Z
|
ship/monit.py
|
universitatjaumei/ship
|
469e35def23791131dbca2b2df64beac873b1149
|
[
"MIT"
] | null | null | null |
from logger import ShipLogger
from commands import *
from time import sleep
import os.path
import re
class Monit:
def __init__(self, home):
self.home = home
self.logger = ShipLogger()
def get_deploy_directory(self, service_name):
return os.path.join(self.home, service_name, service_name + ".jar")
def startup_service(self, module):
appname = module.get_name()
run("sudo /usr/bin/monit -c /etc/monit.conf start " + appname)
times = 1
while not self._running(appname) and times < 30:
sleep(10)
puts(".")
times = times + 1
self.logger.info("Trying to start the service ...")
if not self._running(appname):
error_message = "Can not complete the service '%s' restart" % appname
self.logger.error(error_message)
abort(error_message)
self.logger.info("Monit startup process completed")
def shutdown_service(self, module):
run("sudo /usr/bin/monit -c /etc/monit.conf stop " + module.get_name())
def deploy(self, module):
appname = module.get_name()
jarfile = "%s/target/%s.jar" % (module.get_directory(), appname)
self.logger.info("Copying JAR of module '" + appname + "' to remote host: %s" % self.get_deploy_directory(appname))
put(local_path=jarfile, remote_path=self.get_deploy_directory(appname))
def _running(self, name):
res = run("sudo /usr/bin/monit -c /etc/monit.conf status")
res = res.split("\r\n")
index = res.index("Process '%s'" % name)
status = res[index + 1].strip()
status = re.sub(' +', ' ', status)
return status.split()[1] == 'Running'
if __name__ == "__main__":
from environment import Environment
deploy_environment = Environment("development", "uji-ade-bd2storage")
set_environment(deploy_environment)
monit = Monit("/mnt/data/aplicacions/cron/")
print monit._running("uji-ade-bd2storage")
monit.shutdown_service("uji-ade-bd2storage")
monit.deploy(
"/opt/devel/workspaces/uji/uji-deployment-tools/deploy/target/ADE/uji-ade/uji-ade-bd2storage/target/uji-ade-bd2storage.jar", "uji-ade-bd2storage")
monit.startup_service("uji-ade-bd2storage")
| 34.560606
| 154
| 0.641824
|
8146ff86dd0440277fabc7f4221a19020f30e39e
| 2,513
|
py
|
Python
|
ivf/core/sfs/depth_from_gradient.py
|
tody411/ImageViewerFramework
|
5c183c34e65494b6af1287e70152b995a868c6ac
|
[
"MIT"
] | null | null | null |
ivf/core/sfs/depth_from_gradient.py
|
tody411/ImageViewerFramework
|
5c183c34e65494b6af1287e70152b995a868c6ac
|
[
"MIT"
] | null | null | null |
ivf/core/sfs/depth_from_gradient.py
|
tody411/ImageViewerFramework
|
5c183c34e65494b6af1287e70152b995a868c6ac
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
## @package ivf.core.sfs.depth_from_gradient
#
# ivf.core.sfs.depth_from_gradient utility package.
# @author tody
# @date 2016/02/08
import numpy as np
import scipy.sparse
import cv2
from ivf.core.sfs.constraints import l2Regularization, laplacianMatrix
from ivf.core.solver import amg_solver
def gradientConstraints(I_32F, w_cons=1.0):
epsilon = 0.01
gx = cv2.Sobel(I_32F, cv2.CV_32F, 1, 0, ksize=1)
gx = cv2.GaussianBlur(gx, (0, 0), 5.0)
#gx = smoothGradient(gx)
gxx = cv2.Sobel(gx, cv2.CV_32F, 1, 0, ksize=1)
gxx = gxx.flatten()
gy = cv2.Sobel(I_32F, cv2.CV_32F, 0, 1, ksize=1)
gy = cv2.GaussianBlur(gy, (0, 0), 5.0)
#gy = smoothGradient(gy)
gyy = cv2.Sobel(gy, cv2.CV_32F, 0, 1, ksize=1)
#gyy = cv2.bilateralFilter(gyy,15,20,10)
gyy = gyy.flatten()
h, w = I_32F.shape[:2]
num_verts = h * w
A = scipy.sparse.diags([-1, -1, -1, -1, 4],
[1, w, -1, -w, 0],
shape=(num_verts, num_verts))
#AtA = A.T * A
b = - 100.0 * (gxx + gyy)
#Atb = A.T * b
return w_cons * A, w_cons * b
def depthFromGradient(I_32F, A_8U):
if A_8U is not None:
I_32F = preProcess(I_32F, A_8U)
h, w = I_32F.shape[:2]
A_g, b_g = gradientConstraints(I_32F, w_cons=1.0)
A_rg = l2Regularization(h*w, w_rg=0.000001)
A_L = laplacianMatrix((h, w))
A = A_g + A_rg
b = b_g
D_flat = amg_solver.solve(A, b)
D_32F = D_flat.reshape(h, w)
return D_32F
def smoothGradient(gx):
h, w = gx.shape[:2]
A_L = laplacianMatrix((h, w))
w_g = 1.0
gx_flat = gx.flatten()
cons_ids = np.where(gx_flat > 0.01 * np.max(0.0))
num_verts = h * w
diags = np.zeros(num_verts)
diags[cons_ids] = w_g
A = scipy.sparse.diags(diags, 0) + A_L
b = np.zeros(num_verts, dtype=np.float32)
b[cons_ids] = w_g * gx_flat[cons_ids]
gx_flat = amg_solver.solve(A, b)
gx = gx_flat.reshape(h, w)
print gx.shape
return np.float32(gx)
def preProcess(I0_32F, A_8U):
foreground = A_8U > 0.5 * np.max(A_8U)
background = A_8U == 0
I_32F = np.array(I0_32F)
I_32F[background] = np.min(I_32F)
I_min, I_max = np.min(I_32F), np.max(I_32F)
I_32F = (I_32F - I_min) / (I_max - I_min)
I_32F = cv2.bilateralFilter(I_32F,15,20,10)
sigma = 10.0
for i in xrange(10):
I_32F = cv2.GaussianBlur(I_32F, (0, 0), sigma)
I_32F[foreground] = I0_32F[foreground]
return I_32F
| 24.637255
| 70
| 0.598886
|
311bd4a24f8c95d8a9f4e9443ad5c65c34591e8d
| 802
|
py
|
Python
|
examples/connected-components-1.py
|
TiphaineV/streamfig
|
4acd92625c34bde0089b7963ec076d902d8ebba1
|
[
"MIT"
] | 5
|
2019-09-19T07:11:13.000Z
|
2021-12-13T11:18:41.000Z
|
examples/connected-components-1.py
|
TiphaineV/streamfig
|
4acd92625c34bde0089b7963ec076d902d8ebba1
|
[
"MIT"
] | 3
|
2020-04-23T17:37:23.000Z
|
2021-12-13T09:40:31.000Z
|
examples/connected-components-1.py
|
TiphaineV/streamfig
|
4acd92625c34bde0089b7963ec076d902d8ebba1
|
[
"MIT"
] | 5
|
2018-12-14T13:53:33.000Z
|
2020-05-18T17:22:52.000Z
|
from streamfig import *
s = StreamFig()
s.addColor("tBlue", "#AEC7E8") # 174,199,232
s.addColor("tOrange", "#FFBB78") # 255,187,120
s.addColor("tGreen", "#98DF8A") # 152,223,138
s.addColor("tPink", "#FF9896") # 255, 152,150
s.addColor("tRed", "#DD0000") # 255, 152,150
s.addNode("a")
s.addNode("b")
s.addNode("c")
s.addNode("d")
s.addNode("e")
s.addLink("a", "b", 0, 10)
s.addLink("b", "c", 0, 3)
s.addLink("a", "c", 2, 10, height=0.4)
s.addLink("c", "d", 5, 10)
s.addLink("d", "e", 0, 10)
s.addRectangle("a", "c", -0.1, 4.95, color=-1, bordercolor="tBlue", border="lrtb")
s.addRectangle("d", "e", -0.1, 4.95, color=-1, bordercolor="tBlue", border="lrtb")
s.addRectangle("a", "e", 5, 10.1, color=-1, bordercolor="tBlue", border="lrtb")
s.addTimeLine(ticks=2)
s.save("connected-components-1.fig")
| 27.655172
| 82
| 0.61596
|
9961c7dff626bec5de642c64abf46025518b3c92
| 436
|
py
|
Python
|
polyBlueClient.py
|
HenChao/PolyBlue
|
9e22b81bf8fce38fcb2807dd3f25eae2de6c8bb0
|
[
"Apache-2.0"
] | null | null | null |
polyBlueClient.py
|
HenChao/PolyBlue
|
9e22b81bf8fce38fcb2807dd3f25eae2de6c8bb0
|
[
"Apache-2.0"
] | null | null | null |
polyBlueClient.py
|
HenChao/PolyBlue
|
9e22b81bf8fce38fcb2807dd3f25eae2de6c8bb0
|
[
"Apache-2.0"
] | null | null | null |
from websocket import create_connection
class PolyBlueClient(object):
def __init__(self, app_name):
self.client = create_connection('ws://' + app_name + '.mybluemix.net/socket?Id=2')
def sendOutput(self, message):
self.client.send(message)
def getInput(self, message):
self.sendOutput(message)
result = None
while result is None:
result = self.client.recv()
return result
def close(self):
self.client.close()
| 21.8
| 84
| 0.724771
|
9615748efdbee3e88af5e24004bee88d91146276
| 12,222
|
py
|
Python
|
Tests/test_nonlocal.py
|
denim2x/ironpython3
|
45667291acb6e6d114ada31ced2a6bed7bc5b6a8
|
[
"Apache-2.0"
] | null | null | null |
Tests/test_nonlocal.py
|
denim2x/ironpython3
|
45667291acb6e6d114ada31ced2a6bed7bc5b6a8
|
[
"Apache-2.0"
] | null | null | null |
Tests/test_nonlocal.py
|
denim2x/ironpython3
|
45667291acb6e6d114ada31ced2a6bed7bc5b6a8
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import sys
import unittest
from iptest import IronPythonTestCase, run_test, is_cli, is_cpython
class SyntaxTests(IronPythonTestCase):
def check_compile_error(self, code, msg, lineno):
with self.assertRaises(SyntaxError) as cm:
compile(code, "<testcase>", "exec")
self.assertEqual(cm.exception.msg, msg)
self.assertEqual(cm.exception.lineno, lineno)
def test_no_binding_func(self):
source = """if True:
def foo():
nonlocal x
return x
f()
"""
self.check_compile_error(source, "no binding for nonlocal 'x' found", 3)
def test_no_binding_func_global(self):
source = """if True:
x = 1
def foo():
nonlocal x
return x
f()
"""
self.check_compile_error(source, "no binding for nonlocal 'x' found", 4)
def test_no_binding_class(self):
source = """if True:
class Foo():
x = 1
class Bar():
nonlocal x
def f(self):
return x
"""
self.check_compile_error(source, "no binding for nonlocal 'x' found", 5)
def test_global_nonlocal(self):
source = """if True:
def foo():
global x # CPython's error location
nonlocal x # IronPython's error location
return x
f()
"""
self.check_compile_error(source, "name 'x' is nonlocal and global", 4 if is_cli else 3)
def test_nonlocal_global(self):
source = """if True:
def foo():
nonlocal x # CPython's error location
global x # IronPython's error location
return x
f()
"""
self.check_compile_error(source, "name 'x' is nonlocal and global", 4 if is_cli else 3)
def test_missing_nonlocal(self):
source = """if True:
def foo(x):
def bar():
nonlocal y
return x
return bar()
f(1)
"""
self.check_compile_error(source, "no binding for nonlocal 'y' found", 4)
@unittest.skipIf(is_cpython and sys.version_info <= (3,6), "CPython 3.4, 3.5 issues SyntaxWarning for this case")
def test_prior_assignment(self):
source = """if True:
def foo(x):
def bar():
x = 0
nonlocal x
return x
return bar()
f(1)
"""
self.check_compile_error(source, "name 'x' is assigned to before nonlocal declaration", 5)
@unittest.skipIf(is_cpython and sys.version_info <= (3,6), "CPython 3.4, 3.5 issues SyntaxWarning for this case")
def test_prior_use(self):
source = """if True:
def foo(x):
def bar():
y = x
nonlocal x
return x
return bar()
f(1)
"""
self.check_compile_error(source, "name 'x' is used prior to nonlocal declaration", 5)
@unittest.skipIf(is_cpython and sys.version_info <= (3,6), "CPython 3.4, 3.5 issues SyntaxWarning for this case")
def test_prior_assignment_and_use(self):
source = """if True:
def foo(x):
def bar():
class x: pass # assignment
x = 0 # assignment
x += 1 # use
nonlocal x
return x
return bar()
f(1)
"""
self.check_compile_error(source, "name 'x' is assigned to before nonlocal declaration", 7)
@unittest.skipIf(is_cpython and sys.version_info <= (3,6), "CPython 3.4, 3.5 issues SyntaxWarning for this case")
def test_prior_use_and_assignment(self):
source = """if True:
def foo(x):
def bar():
x += 1 # use
class x: pass # assignment
x = 0 # assignment
nonlocal x
return x
return bar()
f(1)
"""
self.check_compile_error(source, "name 'x' is assigned to before nonlocal declaration", 7)
@unittest.skipIf(is_cpython and sys.version_info <= (3,6), "CPython 3.4, 3.5 issues SyntaxWarning for this case")
def test_prior_call_and_assignment(self):
source = """if True:
def foo(x):
def bar():
x() # use
class x: pass # assignment
x = 0 # assignment
nonlocal x
return x
return bar()
f(int)
"""
if is_cli:
self.check_compile_error(source, "name 'x' is assigned to before nonlocal declaration", 7)
else:
self.check_compile_error(source, "name 'x' is used prior to nonlocal declaration", 7)
class FunctionalTests(IronPythonTestCase):
def test_local_scope_nonlocal(self):
# Test that nonlocal declarations are limited to a local scope (do not propagate to inner scopes)
def foo():
x = 1 # local in foo
def bar():
nonlocal x # from foo
x = 2
self.assertEqual(x, 2)
def gek():
# x is a readable reference to foo.<locals>.x
self.assertEqual(x, 2)
def lee():
nonlocal x # from foo
x = 3 # modifies foo.<locals>.x
self.assertEqual(x, 3)
def kao():
x = 4 # local in kao
self.assertEqual(x, 4)
kao()
self.assertEqual(x, 3) # not changed by kao
lee()
self.assertEqual(x, 3) # changed by lee
gek()
self.assertEqual(x, 3) # changed by lee
bar()
self.assertEqual(x, 3) # changed by lee
foo()
def test_nonlocal_del(self):
# Test that a nonlocal does not rebind to an unshadowed variable after del
def foo():
x1, x2 = 'foo:x1', 'foo:x2' # local in foo
x3 = 'foo:x3'
def bar():
with self.assertRaises(UnboundLocalError) as cm:
del x3 # x3 becomes local in bar but unassigned
self.assertEqual(cm.exception.args[0], "local variable 'x3' referenced before assignment")
with self.assertRaises(NameError) as cm:
dummy = x4 # x4 is local in foo but unassigned
self.assertEqual(cm.exception.args[0], "free variable 'x4' referenced before assignment in enclosing scope")
x1, x2 = 'bar:x1', 'bar:x2' # local in bar, shadowing foo
def gek():
nonlocal x1, x2, x3 # from bar
self.assertEqual(x1, 'bar:x1')
self.assertEqual(x2, 'bar:x2')
x1, x2 = 'gek:x1', 'gek:x2' # reassigned locals in bar
del x1 # deletes a local in bar
with self.assertRaises(NameError) as cm:
del x1 # x1 in bar is already deleted
self.assertEqual(cm.exception.args[0], "free variable 'x1' referenced before assignment in enclosing scope")
del x2 # deletes a local in bar
x2 = 'gek:x2+' # reassigns a variable in bar, bringing it back to life
with self.assertRaises(NameError) as cm:
dummy = x3 # x3 in bar is not yet assigned
self.assertEqual(cm.exception.args[0], "free variable 'x3' referenced before assignment in enclosing scope")
gek()
x3 = 'bar:x3' # finally x3 is assigned and declared local in bar
with self.assertRaises(UnboundLocalError) as cm:
dummy = x1 # x1 is already deleted by gek
self.assertEqual(cm.exception.args[0], "local variable 'x1' referenced before assignment")
with self.assertRaises(UnboundLocalError) as cm:
del x1 # x1 is already deleted by gek
self.assertEqual(cm.exception.args[0], "local variable 'x1' referenced before assignment")
self.assertEqual(x2, 'gek:x2+') # killed and resurrected by gek
bar()
self.assertEqual(x1, 'foo:x1') # unchanged
self.assertEqual(x2, 'foo:x2') # unchanged
self.assertEqual(x3, 'foo:x3') # unchanged
x4 = 'foo:x4' # made local in foo
foo()
def test_class_scope(self):
x = 'func'
class Foo():
x = 'class'
class Bar():
nonlocal x
def f(self):
return x
def get_bar(self):
return self.Bar()
self.assertEqual(Foo.Bar().f(), 'func')
self.assertEqual(Foo().get_bar().f(), 'func')
def test_nonlocal_class_del(self):
def foo():
class Bar():
def del_bar(self):
nonlocal Bar
del Bar
def get_bar(self):
return Bar()
bar = Bar()
return bar.get_bar, bar.del_bar
get_bar, del_bar = foo()
# str(get_bar()) produces something like
# <__main__.FunctionalTests.test_nonlocal_class_del.<locals>.foo.<locals>.Bar object at 0x000002426EEEE908>
self.assertEqual(str(get_bar())[1:].split()[0].split('.')[-1], 'Bar') # get_bar() works
self.assertEqual(del_bar(), None) # delete class Bar
with self.assertRaises(NameError) as cm:
get_bar() # cannot instantiate a nonexistent class
self.assertEqual(cm.exception.args[0], "free variable 'Bar' referenced before assignment in enclosing scope")
@unittest.skipIf(is_cli, "https://github.com/IronLanguages/ironpython3/issues/30")
def test_nonlocal_names(self):
def foo():
x = 'foo:x' # local in foo
def bar():
nonlocal x # from foo
class x(): # reassigns foo.<locals>.x to bar.<locals>.x
def f(self):
return x()
self.assertRegex(str(x().f()),
r"^<%s\.FunctionalTests\.test_nonlocal_names\.<locals>\.foo.<locals>.bar.<locals>.x object at 0x[0-9A-F]+>$" % __name__)
bar()
self.assertEqual(str(x),
"<class '%s.FunctionalTests.test_nonlocal_names.<locals>.foo.<locals>.bar.<locals>.x'>" % __name__)
bar_x = x
def gek():
nonlocal x, x # from foo
x = 'gek:x' # reassigns foo.<locals>.x to a local string
gek()
self.assertEqual(str(x), 'gek:x') # reasigned by gek
self.assertEqual(str(bar_x), # maintains bar.<locals>.x
"<class '%s.FunctionalTests.test_nonlocal_names.<locals>.foo.<locals>.bar.<locals>.x'>" % __name__)
# bar_x.f sees x from foo, not class x from bar
self.assertIsInstance(bar_x(), object)
self.assertRaises(TypeError, bar_x().f)
foo()
def test_nonlocal_import(self):
maxunicode = 0
some_number = 0
def foo():
nonlocal maxunicode, some_number
from sys import maxunicode
from sys import maxsize as some_number
foo()
self.assertEqual(maxunicode, 1114111)
self.assertGreaterEqual(some_number, 0x7FFFFFFF)
run_test(__name__)
| 39.553398
| 140
| 0.515218
|
67807db32690c16f62af77fbc55d2b9e7323b9a1
| 7,023
|
py
|
Python
|
test/dos_test.py
|
JefferyWangSH/dos-solver
|
4c1b8b1b5e94127e00edbb40bb0d5037c6f94ea9
|
[
"MIT"
] | null | null | null |
test/dos_test.py
|
JefferyWangSH/dos-solver
|
4c1b8b1b5e94127e00edbb40bb0d5037c6f94ea9
|
[
"MIT"
] | null | null | null |
test/dos_test.py
|
JefferyWangSH/dos-solver
|
4c1b8b1b5e94127e00edbb40bb0d5037c6f94ea9
|
[
"MIT"
] | null | null | null |
import numpy as np
from matplotlib import pyplot as plt
import sys
def calculate_dos_exact():
# generate grids for momentums of 2d
k_grids_1d = np.linspace(-k_cutoff, k_cutoff, k_num)
px_grids_2d = np.array([px for px in k_grids_1d for py in k_grids_1d])
py_grids_2d = np.array([py for px in k_grids_1d for py in k_grids_1d])
kx_grids_2d = np.array(np.mat(px_grids_2d).transpose())
ky_grids_2d = np.array(np.mat(py_grids_2d).transpose())
freq_grids = np.linspace(-freq_cutoff, freq_cutoff, freq_num)
# generate kernel (gaussian-type)
kernel = np.exp(-0.5*((kx_grids_2d+px_grids_2d)**2 + (ky_grids_2d+py_grids_2d)**2)*corr_length**2) \
* 2*np.pi * (static_gap * corr_length)**2 *(2*k_cutoff/k_num)**2
# generate feynman propagator
ek_grids = (kx_grids_2d**2+ky_grids_2d**2)/(2*mass) + fermi_surface
freq_grids_complex = freq_grids + infinitesimal_imag*1.0j
free_propagator_particle = (freq_grids_complex - ek_grids)**-1
free_propagator_hole = (freq_grids_complex + ek_grids)**-1
# calculate self energy correction and green's function
self_energy = kernel.dot(free_propagator_hole)
green_func = free_propagator_particle / (1 - free_propagator_particle * self_energy)
# allocate spectrum function
spectrum = -2 * np.imag(green_func)
# compress to obatin density of state
dos_list = spectrum.sum(axis=0)/spectrum.shape[0]
# # check the memory
# print(sys.getsizeof(kernel))
# print(sys.getsizeof(free_propagator_hole))
# print(sys.getsizeof(free_propagator_particle))
# print(sys.getsizeof(self_energy))
# print(sys.getsizeof(green_func))
# print(sys.getsizeof(spectrum))
return freq_grids, dos_list
def calculate_dos_approximate():
# generate grids for momentums of 2d
k_grids_1d = np.linspace(-k_cutoff, k_cutoff, k_num)
kx_grids_2d = np.array(np.mat([px for px in k_grids_1d for py in k_grids_1d]).transpose())
ky_grids_2d = np.array(np.mat([py for px in k_grids_1d for py in k_grids_1d]).transpose())
freq_grids = np.linspace(-freq_cutoff, freq_cutoff, freq_num)
# generate feynman propagators: for both particle and hole
ek_grids = (kx_grids_2d**2+ky_grids_2d**2)/(2*mass) + fermi_surface
freq_grids_complex = freq_grids + infinitesimal_imag*1.0j
free_propagator_particle = (freq_grids_complex - ek_grids)**-1
free_propagator_hole = (freq_grids_complex + ek_grids)**-1
# generate approximated self-energy
self_energy = 4 * np.pi**2 * static_gap**2 * free_propagator_hole \
* ( 1 - (free_propagator_hole - (ek_grids-fermi_surface)*2*free_propagator_hole**2) / (4*mass*corr_length**2) )
# compute green's function
green_function = free_propagator_particle / (1-free_propagator_particle*self_energy)
# allocate spectrum function
spectrum = -2 * np.imag(green_function)
# compress to obatin density of state
dos_list = (spectrum).sum(axis=0)/spectrum.shape[0]
return freq_grids, dos_list
# # generate grids
# k_grids = np.linspace(0.0, k_cutoff, k_num)
# freq_grids = np.linspace(-freq_cutoff, freq_cutoff, freq_num)
# # generate feynman propagators: for both particle and hole
# freq_grids_complex = freq_grids + infinitesimal_imag*1.0j
# k_grids_trans = np.array(np.mat(k_grids).transpose())
# dispersion_trans = k_grids_trans**2/(2*mass) + fermi_surface
# free_propagator_particle = (freq_grids_complex - dispersion_trans)**-1
# free_propagator_hole = (freq_grids_complex + dispersion_trans)**-1
# # generate approximated self-energy
# self_energy = 2*np.pi*static_gap**2 * free_propagator_hole * (1 - free_propagator_hole/(2*mass*corr_length**2))
# # compute green's function
# green_function = free_propagator_particle / (1-free_propagator_particle*self_energy)
# # allocate spectrum function
# spectrum = -2 * np.imag(green_function)
# # compress to obatin density of state
# # TODO: check it out
# dos_list = (k_grids_trans*spectrum).sum(axis=0)/spectrum.shape[0]
# return freq_grids, dos_list
def NumericalDeltaFunc(x, epsilon):
# poisson core
return epsilon/(epsilon**2+x**2)/np.pi
def calculate_dos_analytic(epsilon):
# generate grids for momentums of 2d
k_grids_1d = np.linspace(-k_cutoff, k_cutoff, k_num)
kx_grids_2d = np.array(np.mat([px for px in k_grids_1d for py in k_grids_1d]).transpose())
ky_grids_2d = np.array(np.mat([py for px in k_grids_1d for py in k_grids_1d]).transpose())
freq_grids = np.linspace(-freq_cutoff, freq_cutoff, freq_num)
ek_grids = (kx_grids_2d**2+ky_grids_2d**2)/(2*mass) + fermi_surface
eigen_energy = (ek_grids**2 + 4*np.pi * static_gap**2)**0.5
# mean-field results
spectrum = np.pi * (freq_grids + ek_grids)/eigen_energy \
* (NumericalDeltaFunc(freq_grids-eigen_energy, epsilon) - NumericalDeltaFunc(freq_grids+eigen_energy, epsilon))
# add pertubated corrections
spectrum += (np.pi*static_gap/corr_length)**2/mass / (2*2**0.5*eigen_energy**3) \
* (NumericalDeltaFunc(freq_grids+eigen_energy,epsilon) - NumericalDeltaFunc(freq_grids-eigen_energy,epsilon))
# spectrum -= (np.pi*static_gap/corr_length)**2*(ek_grids-fermi_surface)*2**0.5/mass \
# * (3*freq_grids+2*ek_grids)/eigen_energy**3/(freq_grids+ek_grids)**2 \
# * (NumericalDeltaFunc(freq_grids-eigen_energy,epsilon) + NumericalDeltaFunc(freq_grids+eigen_energy,epsilon))
spectrum += (ek_grids-fermi_surface)*(static_gap*corr_length)**(-2)/4/mass * NumericalDeltaFunc(freq_grids+ek_grids,epsilon)
# compress to obatin density of state
dos_list = (spectrum).sum(axis=0)/spectrum.shape[0]
return freq_grids, dos_list
if "__main__":
"""
Take approximate treatment of self-energy, and benchmark with exact results.
"""
# set up model params
freq_cutoff, k_cutoff = 8.0, 6.0
freq_num, k_num = int(1e3), 100
infinitesimal_imag = 0.2
# continuum model with dispersion relation E = p^2/(2m) - mu
mass = 1.0
fermi_surface = -7.0
static_gap = 0.1
corr_length = 30.0
# exact, relatively speaking, results from calculation
freq_exact, dos_exact = calculate_dos_exact()
# approximated results
freq_approx, dos_approx = calculate_dos_approximate()
# analytic approximated results
epsilon = infinitesimal_imag
freq_analytic, dos_analytic = calculate_dos_analytic(epsilon=epsilon)
# plot and comparison
plt.figure()
plt.grid(linestyle="-.")
plt.plot(freq_exact, dos_exact, label="Exact")
plt.plot(freq_approx, dos_approx, label="Approximate")
plt.plot(freq_analytic, dos_analytic, label="Analytic")
plt.ylim(bottom=0.0)
plt.xlabel("${\omega}$", fontsize=13)
plt.ylabel("${N(\omega)}$", fontsize=13)
plt.legend()
plt.tight_layout()
plt.savefig("./test/compare.pdf", dpi=200)
plt.show()
| 41.070175
| 149
| 0.699843
|
d1aadc27af44697f5ac073202b5fb8282331ba76
| 4,655
|
py
|
Python
|
pyctr/crypto/seeddb.py
|
Desterly/pyctr
|
710e96217b64dd4bac302d94261e73b8b1fd3b89
|
[
"MIT"
] | 18
|
2020-07-10T19:07:10.000Z
|
2022-03-24T06:09:14.000Z
|
pyctr/crypto/seeddb.py
|
Desterly/pyctr
|
710e96217b64dd4bac302d94261e73b8b1fd3b89
|
[
"MIT"
] | 20
|
2020-07-10T13:13:30.000Z
|
2022-02-19T21:04:23.000Z
|
pyctr/crypto/seeddb.py
|
Desterly/pyctr
|
710e96217b64dd4bac302d94261e73b8b1fd3b89
|
[
"MIT"
] | 7
|
2021-02-18T08:41:53.000Z
|
2022-03-24T06:09:20.000Z
|
# This file is a part of pyctr.
#
# Copyright (c) 2017-2021 Ian Burgwin
# This file is licensed under The MIT License (MIT).
# You can find the full license text in LICENSE in the root of this project.
from os import PathLike, environ
from os.path import join
from types import MappingProxyType
from typing import TYPE_CHECKING
from ..common import PyCTRError
from ..util import config_dirs, readle
if TYPE_CHECKING:
from typing import BinaryIO, Dict, Union
__all__ = ['SeedDBError', 'InvalidProgramIDError', 'InvalidSeedError', 'MissingSeedError', 'load_seeddb', 'get_seed',
'add_seed', 'get_all_seeds', 'save_seeddb']
SEED_ENTRY_PADDING = (b'\0' * 8)
class SeedDBError(PyCTRError):
"""Generic exception for seed operations."""
class InvalidProgramIDError(SeedDBError):
"""Program ID is not in a valid format."""
class InvalidSeedError(SeedDBError):
"""The provided seed is not in a valid format."""
class MissingSeedError(SeedDBError):
"""Seed not found in the database."""
_seeds: 'Dict[int, bytes]' = {}
_loaded_from_default_paths = False
def _load_seeds_from_file_object(fh: 'BinaryIO'):
seed_count = readle(fh.read(4))
fh.seek(0x10)
for _ in range(seed_count):
entry = fh.read(0x20)
title_id = readle(entry[0:8])
_seeds[title_id] = entry[0x8:0x18]
def _normalize_program_id(program_id: 'Union[int, str, bytes]') -> int:
if not isinstance(program_id, (int, str, bytes)):
raise InvalidProgramIDError('not an int, str, or bytes')
if isinstance(program_id, str):
program_id = int(program_id, 16)
elif isinstance(program_id, bytes):
program_id = int.from_bytes(program_id, 'little')
return program_id
def load_seeddb(fp: 'Union[PathLike, str, bytes, BinaryIO]' = None):
"""
Load a seeddb file.
:param fp: A file path or file-like object with the seeddb data.
"""
global _loaded_from_default_paths
if fp:
if isinstance(fp, (PathLike, str, bytes)):
fp = open(fp, 'rb')
_load_seeds_from_file_object(fp)
elif not _loaded_from_default_paths:
seeddb_paths = [join(x, 'seeddb.bin') for x in config_dirs]
try:
# try to insert the path in the SEEDDB_PATH environment variable
seeddb_paths.insert(0, environ['SEEDDB_PATH'])
except KeyError:
pass
for path in seeddb_paths:
try:
with open(path, 'rb') as fh:
_load_seeds_from_file_object(fh)
except FileNotFoundError:
pass
_loaded_from_default_paths = True
def get_seed(program_id: 'Union[int, str, bytes]', *, load_if_required: bool = True):
"""
Get a seed for a Program ID.
:param program_id: The Program ID to search for. If `bytes` is provided, the value must be little-endian.
:param load_if_required: Automatically load using :func:`load_seeddb` if the requested Program ID is not already
available.
"""
program_id = _normalize_program_id(program_id)
try:
return _seeds[program_id]
except KeyError:
if _loaded_from_default_paths or not load_if_required:
raise MissingSeedError(f'{program_id:016x}')
else:
if load_if_required:
load_seeddb()
return get_seed(program_id, load_if_required=False)
def add_seed(program_id: 'Union[int, str, bytes]', seed: 'Union[bytes, str]'):
"""
Adds a seed to the database.
:param program_id: The Program ID associated with the seed. If `bytes` is provided, the value must be little-endian.
:param seed: The seed to add.
"""
program_id = _normalize_program_id(program_id)
if isinstance(seed, str):
try:
seed = bytes.fromhex(seed)
except ValueError:
raise InvalidSeedError('seed is not in hex')
if len(seed) != 16:
raise InvalidSeedError(f'expected 16 bytes, got {len(seed)}')
_seeds[program_id] = seed
def get_all_seeds():
"""
Gets all the loaded seeds.
:return: A read-only view of the seed database.
"""
return MappingProxyType(_seeds)
def save_seeddb(fp: 'Union[PathLike, str, bytes, BinaryIO]'):
"""
Save the seed database to a seeddb file.
:param fp: A file path or file-like object to write the seeddb data to.
"""
if isinstance(fp, (PathLike, str, bytes)):
fp = open(fp, 'wb')
fp.write(len(_seeds).to_bytes(4, 'little') + (b'\0' * 12))
for program_id, seed in _seeds.items():
fp.write(program_id.to_bytes(8, 'little') + seed + SEED_ENTRY_PADDING)
| 29.462025
| 120
| 0.656713
|
7f534ed51b463c66f34f2c61c396c26cbb43ba0b
| 8,227
|
py
|
Python
|
server/src/oscarbluelight/tests/offer/test_benefit_fixed_price.py
|
MaximBrewer/sebe
|
4b94b2c782d018b6fa3a130fa30173386cc9bfdd
|
[
"0BSD"
] | 8
|
2016-06-18T01:40:26.000Z
|
2021-02-08T04:08:58.000Z
|
server/src/oscarbluelight/tests/offer/test_benefit_fixed_price.py
|
MaximBrewer/sebe
|
4b94b2c782d018b6fa3a130fa30173386cc9bfdd
|
[
"0BSD"
] | 16
|
2018-05-04T13:00:07.000Z
|
2021-05-27T14:54:09.000Z
|
server/src/oscarbluelight/tests/offer/test_benefit_fixed_price.py
|
MaximBrewer/sebe
|
4b94b2c782d018b6fa3a130fa30173386cc9bfdd
|
[
"0BSD"
] | 3
|
2016-12-19T11:30:47.000Z
|
2019-10-27T20:30:15.000Z
|
from decimal import Decimal as D
from django.test import TestCase
from oscar.test import factories
from oscar.test.basket import add_product, add_products
from django_redis import get_redis_connection
from oscarbluelight.offer.models import (
Condition,
ConditionalOffer,
Range,
Benefit,
CompoundCondition,
BluelightCountCondition,
BluelightFixedPriceBenefit,
)
from .base import BaseTest
from unittest import mock
class TestAFixedPriceDiscountAppliedWithCountCondition(TestCase):
def setUp(self):
# Flush the cache
conn = get_redis_connection("redis")
conn.flushall()
range = Range.objects.create(name="All products", includes_all_products=True)
self.condition = BluelightCountCondition.objects.create(
range=range,
proxy_class="oscarbluelight.offer.conditions.BluelightCountCondition",
value=3,
)
self.benefit = BluelightFixedPriceBenefit.objects.create(
range=range,
proxy_class="oscarbluelight.offer.benefits.BluelightFixedPriceBenefit",
value=D("20.00"),
)
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("0.00"), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_is_worth_less_than_value(self):
add_product(self.basket, D("6.00"), 3)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("0.00"), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(3, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_is_worth_the_same_as_value(self):
add_product(self.basket, D("5.00"), 4)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("0.00"), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(4, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_is_more_than_value(self):
add_product(self.basket, D("8.00"), 4)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("12.00"), result.discount)
self.assertEqual(4, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_obeys_max_discount_setting(self):
self.benefit.max_discount = D("10.00")
self.benefit.save()
add_product(self.basket, D("8.00"), 4)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("10.00"), result.discount)
self.assertEqual(4, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_is_more_than_value_and_max_affected_items_set(
self,
):
self.benefit.max_affected_items = 3
self.benefit.save()
add_product(self.basket, D("8.00"), 4)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("4.00"), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(1, self.basket.num_items_without_discount)
def test_rounding_error_for_multiple_products(self):
add_products(self.basket, [(D("7.00"), 1), (D("7.00"), 1), (D("7.00"), 1)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("1.00"), result.discount)
# Make sure discount together is the same as final discount
# Rounding error would return 0.99 instead 1.00
cumulative_discount = sum(
line.discount_value for line in self.basket.all_lines()
)
self.assertEqual(result.discount, cumulative_discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_records_reason_for_discount_no_voucher(self):
self.offer.name = "My Offer Name"
self.offer.description = "My Offer Description"
self.offer.get_voucher = mock.Mock()
self.offer.get_voucher.return_value = None
add_product(self.basket, D("5.00"), 5)
# Apply benefit twice to simulate how Applicator will actually do it
self.benefit.apply(self.basket, self.condition, self.offer)
self.benefit.apply(self.basket, self.condition, self.offer)
line = self.basket.all_lines()[0]
descrs = line.get_discount_descriptions()
self.assertEqual(len(descrs), 1)
self.assertEqual(descrs[0].amount, D("5.00"))
self.assertEqual(descrs[0].offer_name, "My Offer Name")
self.assertEqual(descrs[0].offer_description, "My Offer Description")
self.assertIsNone(descrs[0].voucher_name)
self.assertIsNone(descrs[0].voucher_code)
def test_records_reason_for_discount_with_voucher(self):
voucher = mock.Mock()
voucher.name = "My Voucher"
voucher.code = "SWEETDEAL"
self.offer.name = "Offer for Voucher"
self.offer.description = ""
self.offer.get_voucher = mock.Mock()
self.offer.get_voucher.return_value = voucher
add_product(self.basket, D("5.00"), 5)
# Apply benefit twice to simulate how Applicator will actually do it
self.benefit.apply(self.basket, self.condition, self.offer)
self.benefit.apply(self.basket, self.condition, self.offer)
line = self.basket.all_lines()[0]
descrs = line.get_discount_descriptions()
self.assertEqual(len(descrs), 1)
self.assertEqual(descrs[0].amount, D("5.00"))
self.assertEqual(descrs[0].offer_name, "Offer for Voucher")
self.assertEqual(descrs[0].offer_description, "")
self.assertEqual(descrs[0].voucher_name, "My Voucher")
self.assertEqual(descrs[0].voucher_code, "SWEETDEAL")
class FixedPriceBenefitCompoundConditionTest(BaseTest):
def test_apply_with_compound_condition(self):
basket = self._build_basket()
all_products = Range()
all_products.name = "site"
all_products.includes_all_products = True
all_products.save()
cond_a = Condition()
cond_a.proxy_class = "oscarbluelight.offer.conditions.BluelightValueCondition"
cond_a.value = 10
cond_a.range = all_products
cond_a.save()
cond_b = Condition()
cond_b.proxy_class = "oscarbluelight.offer.conditions.BluelightCountCondition"
cond_b.value = 2
cond_b.range = all_products
cond_b.save()
condition = CompoundCondition()
condition.proxy_class = "oscarbluelight.offer.conditions.CompoundCondition"
condition.conjunction = CompoundCondition.OR
condition.save()
condition.subconditions.set([cond_a, cond_b])
condition.save()
benefit = Benefit()
benefit.proxy_class = "oscarbluelight.offer.benefits.BluelightFixedPriceBenefit"
benefit.value = 0
benefit.range = all_products
benefit.max_affected_items = 3
benefit.save()
offer = ConditionalOffer()
offer.condition = condition
offer.benefit = benefit
offer.save()
line = basket.all_lines()[0]
self.assertEqual(line.quantity_with_discount, 0)
self.assertEqual(line.quantity_without_discount, 5)
discount = offer.apply_benefit(basket)
line = basket.all_lines()[0]
self.assertEqual(line.quantity_with_discount, 3)
self.assertEqual(line.quantity_without_discount, 2)
self.assertEqual(discount.discount, D("30.00"))
self.assertEqual(basket.total_excl_tax_excl_discounts, D("50.00"))
self.assertEqual(basket.total_excl_tax, D("20.00"))
| 41.97449
| 93
| 0.6881
|
1086459b677f61d0e5ac370020226a2bbd402118
| 2,305
|
py
|
Python
|
core/models/sql_traits/info_schema_sql.py
|
bopopescu/sdba
|
f1ecb71ebe627643fd296a07a3ca66b366cd37b9
|
[
"MIT"
] | 2
|
2020-08-11T13:55:40.000Z
|
2021-01-05T15:23:32.000Z
|
core/models/sql_traits/info_schema_sql.py
|
bopopescu/sdba
|
f1ecb71ebe627643fd296a07a3ca66b366cd37b9
|
[
"MIT"
] | null | null | null |
core/models/sql_traits/info_schema_sql.py
|
bopopescu/sdba
|
f1ecb71ebe627643fd296a07a3ca66b366cd37b9
|
[
"MIT"
] | 1
|
2020-07-24T07:26:46.000Z
|
2020-07-24T07:26:46.000Z
|
from abc import ABC
class InfoSchemaSql(ABC):
@property
def get_biggest_tables_sql(self) -> str:
"""
get_biggest_tables_sql
Returns:
str: plain SQL
"""
return "" \
"SELECT " \
"table_name AS `Table`, table_schema AS `Database`, round(((data_length + index_length) / 1024 / 1024 / 1024), 2) `seze_in_gb` " \
"FROM information_schema.TABLES " \
"ORDER BY seze_in_gb DESC " \
"LIMIT %s"
@property
def get_biggest_tables_sql(self) -> str:
"""
get_biggest_tables_sql
Returns:
str: plain SQL
"""
return "" \
"SELECT " \
"ENGINE, COUNT(*) AS count_tables, SUM(DATA_LENGTH+INDEX_LENGTH) AS size, SUM(INDEX_LENGTH) AS index_size " \
"FROM information_schema.TABLES " \
"WHERE TABLE_SCHEMA NOT IN ('mysql', 'INFORMATION_SCHEMA','PERFORMANCE_SCHEMA') AND ENGINE IS NOT NULL " \
"GROUP BY ENGINE " \
"LIMIT %s"
@property
def get_tables_without_pk_sql(self) -> str:
"""
get_tables_without_pk_sql
Returns:
str: plain SQL
"""
return "" \
"SELECT t.TABLE_SCHEMA, t.TABLE_NAME, t.TABLE_ROWS " \
"FROM information_schema.TABLES t " \
"LEFT JOIN " \
"information_schema.TABLE_CONSTRAINTS tc " \
"ON t.table_schema = tc.table_schema AND t.table_name = tc.table_name AND tc.constraint_type = 'PRIMARY KEY' " \
"WHERE tc.constraint_name IS NULL AND t.table_type = 'BASE TABLE' " \
"ORDER BY TABLE_ROWS DESC " \
"LIMIT %s"
@property
def get_size_per_engine_sql(self) -> str:
"""
get_size_per_engine_sql
Returns:
str: plain SQL
"""
return "" \
"SELECT ENGINE, COUNT(*) AS count_tables, SUM(DATA_LENGTH+INDEX_LENGTH) AS size, SUM(INDEX_LENGTH) AS index_size " \
"FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA NOT IN ('mysql', 'INFORMATION_SCHEMA','PERFORMANCE_SCHEMA') AND ENGINE IS NOT NULL " \
"GROUP BY ENGINE " \
"LIMIT %s"
| 32.464789
| 152
| 0.544035
|
77b317627693c98dfeee7b5562e0dd59b1c22868
| 16,632
|
py
|
Python
|
evewspace/Map/utils.py
|
darkorb/eve-wspace
|
687f3aeb6cd0a446a74108b2a5c03cb21c464497
|
[
"Apache-2.0"
] | null | null | null |
evewspace/Map/utils.py
|
darkorb/eve-wspace
|
687f3aeb6cd0a446a74108b2a5c03cb21c464497
|
[
"Apache-2.0"
] | null | null | null |
evewspace/Map/utils.py
|
darkorb/eve-wspace
|
687f3aeb6cd0a446a74108b2a5c03cb21c464497
|
[
"Apache-2.0"
] | null | null | null |
# Eve W-Space
# Copyright 2014 Andrew Austin and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from datetime import timedelta
from math import pow, sqrt
import datetime
import json
from core.utils import get_config
from django.conf import settings
from django.core.cache import cache
from django.db.models import Q
import pytz
class MapJSONGenerator(object):
"""Provides methods create a JSON representation of a Map.
Instantiated with a map and user.
Provides a method that returns the JSON representation of the map.
"""
def __init__(self, map, user):
self.map = map
self.user = user
self.pvp_threshold = int(get_config("MAP_PVP_THRESHOLD", user).value)
self.npc_threshold = int(get_config("MAP_NPC_THRESHOLD", user).value)
self.interest_time = int(get_config("MAP_INTEREST_TIME", user).value)
def _get_interest_path(self):
"""Get all MapSystems contained in a path to a system of interest."""
try:
return self._interest_path
except AttributeError:
threshold = (datetime.datetime.now(pytz.utc) -
timedelta(minutes=self.interest_time))
systems = []
for system in self.map.systems.filter(
interesttime__gt=threshold).iterator():
systems.extend(self.get_path_to_map_system(system))
self._interest_path = systems
return systems
@staticmethod
def get_cache_key(map_inst):
return '%s_map' % map_inst.pk
@staticmethod
def get_path_to_map_system(system):
"""
Returns a list of MapSystems on the route between the map root and
the provided MapSystem.
"""
systemlist = []
parent = system
while parent:
systemlist.append(parent)
if parent.parentsystem and not parent.parent_wormhole.collapsed:
parent = parent.parentsystem
else:
parent = None
return systemlist
def get_system_icon(self, system):
"""Get URL to system background icon.
Takes a MapSystem and returns the appropriate icon to
display on the map as a relative URL.
"""
pvp_threshold = self.pvp_threshold
npc_threshold = self.npc_threshold
static_prefix = "%s" % (settings.STATIC_URL + "images/")
if system.system.sysclass == 99:
return static_prefix + "scan.png"
if system.system.stfleets.filter(ended__isnull=True).exists():
return static_prefix + "farm.png"
if system.system.shipkills + system.system.podkills > pvp_threshold:
return static_prefix + "pvp.png"
if system.system.npckills > npc_threshold:
return static_prefix + "carebears.png"
# unscanned for >24h
if not system.system.signatures.filter(modified_time__gte=(datetime.datetime.now(pytz.utc)-datetime.timedelta(days=1))).filter(sigtype__isnull=False).exists():
return static_prefix + "scan.png"
# partially scanned
if system.system.signatures.filter(sigtype__isnull=True).exists():
return static_prefix + "isis_scan.png"
return None
def system_to_dict(self, system, level_x, level_y):
"""Get dict representation of a system.
Takes a MapSystem and X, Y data.
Returns the dict of information to be passed to the map JS as JSON.
"""
system_obj = system.system
is_wspace = system_obj.is_wspace()
system_dict = {
'sysID': system_obj.pk,
'Name': system_obj.name,
'LevelX': level_x,
'LevelY': level_y,
'SysClass': system_obj.sysclass,
'Friendly': system.friendlyname,
'interest':
system.interesttime and
system.interesttime > datetime.datetime.now(pytz.utc) -
timedelta(minutes=self.interest_time),
'interestpath': system in self._get_interest_path(),
'activePilots': len(system_obj.pilot_list),
'pilot_list': [x[1][1] for x in system_obj.pilot_list.items()
if x[1][1] != "OOG Browser"],
'iconImageURL': self.get_system_icon(system),
'msID': system.pk,
'backgroundImageURL': self.get_system_background(system),
'effect': system_obj.wsystem.effect if is_wspace else None,
'importance': system_obj.importance,
'shattered':
system_obj.wsystem.is_shattered if is_wspace else False,
}
if system.parentsystem:
parent_wh = system.parent_wormhole
system_dict.update({
'ParentID': system.parentsystem.pk,
'WhToParent': parent_wh.bottom_type.name,
'WhFromParent': parent_wh.top_type.name,
'WhMassStatus': parent_wh.mass_status,
'WhTimeStatus': parent_wh.time_status,
'WhTotalMass': parent_wh.max_mass,
'WhJumpMass': parent_wh.jump_mass,
'WhToParentBubbled': parent_wh.bottom_bubbled,
'WhFromParentBubbled': parent_wh.top_bubbled,
'whID': parent_wh.pk,
'collapsed': bool(parent_wh.collapsed),
})
else:
system_dict.update({
'ParentID': None,
'WhToParent': "",
'WhFromParent': "",
'WhTotalMass': None,
'WhJumpMass': None,
'WhMassStatus': None,
'WhTimeStatus': None,
'WhToParentBubbled': None,
'WhFromParentBubbled': None,
'whID': None,
'collapsed': False,
})
return system_dict
@staticmethod
def get_system_background(system):
"""
Takes a MapSystem and returns the appropriate background icon
as a relative URL or None.
"""
importance = system.system.importance
if importance == 0:
return None
elif importance == 1:
image = 'skull.png'
elif importance == 2:
image = 'mark.png'
else:
raise ValueError
return "{0}images/{1}".format(settings.STATIC_URL, image)
def get_systems_json(self):
"""Returns a JSON string representing the systems in a map."""
cache_key = self.get_cache_key(self.map)
systems = cache.get(cache_key)
if systems is None:
systems = self.create_syslist()
cache.set(cache_key, systems, 15)
user_locations_dict = cache.get('user_%s_locations' % self.user.pk)
if user_locations_dict:
user_img = "%s/images/mylocation.png" % (settings.STATIC_URL,)
user_locations = [i[1][0] for i in user_locations_dict.items()]
for system in systems:
if (system['sysID'] in user_locations and
system['iconImageURL'] is None):
system['iconImageURL'] = user_img
return json.dumps(systems, sort_keys=True)
def create_syslist(self):
"""
Return list of system dictionaries with appropriate x/y levels
for map display.
"""
# maps system ids to child/parent system ids
children = defaultdict(list)
# maps system ids to objects
systems = dict()
# maps system ids to priorities
priorities = dict()
for system in (self.map.systems.all()
.select_related('system', 'parentsystem',
'parent_wormhole')
.iterator()):
children[system.parentsystem_id].append(system.pk)
systems[system.pk] = system
priorities[system.pk] = system.display_order_priority
# sort children by priority
for l in children.values():
l.sort(key=priorities.__getitem__)
# actual map layout generation
layout_gen = LayoutGenerator(children)
system_positions = layout_gen.get_layout()
# generate list of system dictionaries for conversion to JSON
syslist = []
for sys_id in layout_gen.processed:
sys_obj = systems[sys_id]
x, y = system_positions[sys_id]
syslist.append(self.system_to_dict(sys_obj, x, y))
return syslist
def get_wormhole_type(system1, system2):
"""Gets the one-way wormhole types between system1 and system2."""
from Map.models import WormholeType
source = "K"
# Set the source and destination for system1 > system2
if system1.is_wspace:
source = str(system1.sysclass)
if system1.sysclass == 7:
source = "H"
if system1.sysclass in [8, 9, 10, 11]:
source = "NH"
destination = system2.sysclass
sourcewh = None
if source == "H":
if WormholeType.objects.filter(
source="H", destination=destination).count() == 0:
sourcewh = WormholeType.objects.filter(
source="K", destination=destination).all()
else:
sourcewh = WormholeType.objects.filter(
source="H", destination=destination).all()
if source == "NH":
if WormholeType.objects.filter(
source="NH", destination=destination).count() == 0:
sourcewh = WormholeType.objects.filter(
source="K", destination=destination).all()
else:
sourcewh = WormholeType.objects.filter(
source="NH", destination=destination).all()
if source == "5" or source == "6":
if WormholeType.objects.filter(
source="Z", destination=destination).count() != 0:
sourcewh = (WormholeType.objects
.filter(Q(source="Z") | Q(source='W'))
.filter(destination=destination).all())
if sourcewh is None:
sourcewh = (WormholeType.objects
.filter(Q(source=source) | Q(source='W'))
.filter(destination=destination).all())
return sourcewh
def get_possible_wh_types(system1, system2):
"""Takes two systems and gets the possible wormhole types between them.
For example, given system1 as highsec and system2 as C2, it should return
R943 and B274. system1 is the source and system2 is the destination.
Results are returned as lists because some combinations have
multiple possibilities.
Returns a dict in the format {system1: [R943,], system2: [B274,]}.
"""
# Get System1 > System2
forward = get_wormhole_type(system1, system2)
# Get Reverse
reverse = get_wormhole_type(system2, system1)
result = {'system1': forward, 'system2': reverse}
return result
def convert_signature_id(sigid):
"""Standardize the signature ID to XXX-XXX if info is available."""
escaped_sigid = sigid.replace(' ', '').replace('-', '').upper()
if len(escaped_sigid) == 6:
return "%s-%s" % (escaped_sigid[:3], escaped_sigid[3:])
else:
return sigid.upper()
class RouteFinder(object):
"""Provides methods for finding distances between systems.
Has methods for getting the shortest stargate jump route length,
the light-year distance, and the shortest stargate route
as a list of KSystem objects.
"""
def __init__(self):
from django.core.cache import cache
if not cache.get('route_graph'):
self._cache_graph()
else:
import cPickle
self.graph = cPickle.loads(cache.get('route_graph'))
@staticmethod
def _get_ly_distance(sys1, sys2):
"""
Gets the distance in light years between two systems.
"""
x1 = sys1.x
y1 = sys1.y
z1 = sys1.z
x2 = sys2.x
y2 = sys2.y
z2 = sys2.z
distance = sqrt(pow(x1 - x2, 2) +
pow(y1 - y2, 2) +
pow(z1 - z2, 2)) / 9.4605284e+15
return distance
def ly_distance(self, sys1, sys2):
return self._get_ly_distance(sys1, sys2)
def route_as_ids(self, sys1, sys2):
return self._find_route(sys1, sys2)
def route(self, sys1, sys2):
from Map.models import KSystem
return [KSystem.objects.get(pk=sysid)
for sysid in self._find_route(sys1, sys2)]
def route_length(self, sys1, sys2):
return len(self._find_route(sys1, sys2))
def _cache_graph(self):
from Map.models import KSystem
from core.models import SystemJump
from django.core.cache import cache
import cPickle
import networkx as nx
if not cache.get('route_graph'):
graph = nx.Graph()
for from_system in KSystem.objects.all():
for to_system in (SystemJump.objects
.filter(fromsystem=from_system.pk)):
graph.add_edge(from_system.pk, to_system.tosystem)
cache.set('route_graph',
cPickle.dumps(graph, cPickle.HIGHEST_PROTOCOL), 0)
self.graph = graph
def _find_route(self, sys1, sys2):
"""
Takes two system objects (can be KSystem or SystemData).
Returns a list of system IDs that comprise the route.
"""
import networkx as nx
import cPickle
if not self.graph:
from django.core.cache import cache
if not cache.get('route_graph'):
self._cache_graph()
self.graph = cPickle.loads(cache.get('route_graph'))
else:
self.graph = cPickle.loads(cache.get('route_graph'))
return nx.shortest_path(self.graph, source=sys1.pk, target=sys2.pk)
class LayoutGenerator(object):
"""Provides methods for generating the map layout."""
def __init__(self, children):
"""Create new LayoutGenerator.
children should be a dictionary of system ids as
keys and their child ids as values.
"""
self.children = children
self.positions = None
self.occupied = [-1]
# after processing is done, this contains the processed
# system ids in drawing order
self.processed = []
def get_layout(self):
"""Create map layout.
returns a dictionary containing x, y positions for
the given system ids.
"""
if self.positions is not None:
return self.positions
self.positions = {}
root_node = self.children[None][0]
self._place_node(root_node, 0, 0)
return self.positions
def _place_node(self, node_id, x, min_y):
"""Determine x, y position for a node.
node_id: id of the node to be positioned
x: x position (depth) of the node
min_y: minimal y position of the node
(can't be above parent nodes)
returns: y offset relative to min_y
"""
self.processed.append(node_id)
# initially set y to the next free y in this column
# or min_y, whichever is greater
try:
y_occupied = self.occupied[x]
except IndexError:
self.occupied.append(-1)
y_occupied = -1
y = max(min_y, y_occupied + 1)
# position first child (and thus its children)
# and move this node down if child moved down
try:
first_child = self.children[node_id][0]
y += self._place_node(first_child, x + 1, y)
except IndexError:
pass # node has no children, ignore that.
# system position is now final, save it
self.occupied[x] = y
self.positions[node_id] = (x, y)
# place the rest of the children
for child in self.children[node_id][1:]:
self._place_node(child, x + 1, y)
return y - min_y
| 35.387234
| 167
| 0.597042
|
29e72a55f24dd4f76a08265fffd37a8ab54eba6e
| 21,121
|
py
|
Python
|
driver27/tests/test_views.py
|
SRJ9/driver27
|
3fd32005a89112fe9f94281b33a497d4ab1ebb55
|
[
"MIT"
] | 1
|
2016-10-12T22:49:09.000Z
|
2016-10-12T22:49:09.000Z
|
driver27/tests/test_views.py
|
SRJ9/driver27
|
3fd32005a89112fe9f94281b33a497d4ab1ebb55
|
[
"MIT"
] | 1
|
2016-10-12T22:47:08.000Z
|
2016-10-12T22:47:08.000Z
|
driver27/tests/test_views.py
|
SRJ9/driver27
|
3fd32005a89112fe9f94281b33a497d4ab1ebb55
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.contrib.admin.sites import AdminSite
from django.test import TestCase, Client, RequestFactory
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.forms.models import inlineformset_factory
from django.contrib.messages.storage.fallback import FallbackStorage
from ..models import Season, Driver, Team, Competition, Circuit, get_tuples_from_results
from ..models import GrandPrix, Race, Seat, ContenderSeason
from ..admin import SeasonAdmin, SeasonAdminForm, DriverAdmin, TeamAdmin
from ..admin import CompetitionAdmin, CircuitAdmin, GrandPrixAdmin
from ..admin import RaceAdmin, RelatedCompetitionAdmin
from ..admin import RaceInline, ResultInline, SeatInline
from ..admin.forms import RaceAdminForm
from ..admin.formsets import RaceFormSet
from ..admin.common import get_circuit_id_from_gp, GrandPrixWidget
from ..punctuation import get_punctuation_config
from rest_framework.test import APITestCase
from ..common import DRIVER27_NAMESPACE, DRIVER27_API_NAMESPACE
from django import forms
import json
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
def get_request():
request = MockRequest()
request.user = MockSuperUser()
return request
def get_fixtures_test():
# django test uses a new db, while pytest use transaction no_commit in the same settings db
if hasattr(settings, 'PYTEST_SETTING') and settings.PYTEST_SETTING:
return None
else:
return ['driver27.json', ]
class FixturesTest(TestCase):
fixtures = get_fixtures_test()
class ViewTest(FixturesTest):
def setUp(self):
self.site = AdminSite()
self.client = Client()
self.factory = RequestFactory()
def _GET_request(self, reverse_url, kwargs=None, data=None, code=200, namespace=DRIVER27_NAMESPACE):
# Issue a GET request.
reverse_url = namespace+':'+reverse_url
the_reverse = reverse(reverse_url, kwargs=kwargs)
response = self.client.get(the_reverse, data=data, follow=True)
# Check that the response is 302 OK.
self.assertEqual(response.status_code, code)
# def test_competition_list(self):
# self._GET_request('competition:list')
def _test_competition_view(self):
kwargs = {'competition_slug': 'f1'}
self._GET_request('competition:view', kwargs=kwargs)
self._GET_request('competition:driver-olympic', kwargs=kwargs)
self._GET_request('competition:driver-comeback', kwargs=kwargs)
self._GET_request('competition:driver-record-index', kwargs=kwargs)
self._GET_request('competition:team-olympic', kwargs=kwargs)
self._GET_request('competition:driver-seasons-rank', kwargs=kwargs)
self._GET_request('competition:team-seasons-rank', kwargs=kwargs)
self._GET_request('competition:driver-record-index', kwargs=kwargs)
self._GET_request('competition:view', kwargs={'competition_slug': 'f19'}, code=404)
def _test_season_view(self):
kwargs = {'competition_slug': 'f1', 'year': 2016}
self._GET_request('season:view', kwargs=kwargs)
self._GET_request('season:race-list', kwargs=kwargs)
race_kw = {'race_id': 1}
race_kw.update(kwargs)
self._GET_request('season:race-view', kwargs=race_kw)
self._GET_request('season:driver', kwargs=kwargs)
self._GET_request('season:driver-olympic', kwargs=kwargs)
self._GET_request('season:driver-comeback', kwargs=kwargs)
self._GET_request('season:driver-record-index', kwargs=kwargs)
self._GET_request('season:team', kwargs=kwargs)
self._GET_request('season:team-olympic', kwargs=kwargs)
self._GET_request('season:team-record-index', kwargs=kwargs)
self._GET_request('season:race-list', kwargs=kwargs)
kwargs = {'competition_slug': 'f19', 'year': 2006}
self._GET_request('season:view', kwargs=kwargs, code=404)
def test_ajax_standing(self, model='driver', default_code=200):
URL = 'dr27-ajax:standing'
data = {'model': model}
self._GET_request(URL, data=data, code=default_code)
data['competition_slug'] = 'f1'
self._GET_request(URL, data=data, code=default_code)
data['year'] = 2016
self._GET_request(URL, data=data, code=default_code)
data['olympic'] = True
self._GET_request(URL, data=data, code=default_code)
def test_ajax_standing_team(self):
self.test_ajax_standing(model='team')
def test_ajax_standing_404_model(self):
self.test_ajax_standing(model='else', default_code=404)
def test_ajax_stats(self, model='driver', default_code=200):
URL = 'dr27-ajax:stats'
data = {'model': model}
self._GET_request(URL, data=data, code=default_code)
data['competition_slug'] = 'f1'
self._GET_request(URL, data=data, code=default_code)
data['year'] = 2016
self._GET_request(URL, data=data, code=default_code)
data['record'] = 'POLE'
self._GET_request(URL, data=data, code=default_code)
for opt in [None, 'streak', 'seasons', 'streak_top', 'streak_actives', 'streak_top_actives']:
data['rank_opt'] = opt
self._GET_request(URL, data=data, code=default_code)
data['record'] = 'FFF'
self._GET_request(URL, data=data, code=404)
def test_ajax_stats_team(self):
self.test_ajax_stats(model='team')
def test_ajax_stats_404_model(self):
self.test_ajax_stats(model='else', default_code=404)
def test_race_view(self):
kwargs = {'competition_slug': 'f1', 'year': 2016, 'race_id': 1}
self._GET_request('season:race-view', kwargs=kwargs)
kwargs['race_id'] = 200
self._GET_request('season:race-view', kwargs=kwargs, code=404)
def test_driver_records_global_view(self):
self._GET_request('global:index')
self._GET_request('global:driver')
self._GET_request('global:driver-olympic')
self._GET_request('global:driver-seasons-rank')
self._GET_request('global:driver-comeback')
self._GET_request('global:driver-record-index')
self._test_competition_view()
self._test_season_view()
def test_profiles_view(self):
self._GET_request('global:driver-profile', kwargs={'driver_id': 1})
self._GET_request('global:team-profile', kwargs={'team_id': 1})
def test_team_records_global_view(self):
self._GET_request('global:team')
self._GET_request('global:team-olympic')
self._GET_request('global:team-seasons-rank')
self._GET_request('global:team-record-index')
def test_contender_season_points(self):
driver = Driver.objects.get(pk=1)
season = Season.objects.get(pk=1)
contender_season = ContenderSeason(driver=driver, season=season)
punctuation_config_a = get_punctuation_config('F1-25')
contender_season_points_a = contender_season.get_points(punctuation_config=punctuation_config_a)
punctuation_config_b = get_punctuation_config('F1-10+6')
contender_season_points_b = contender_season.get_points(punctuation_config=punctuation_config_b)
self.assertGreater(contender_season_points_a, contender_season_points_b)
def test_result_tuple(self):
seat = Seat.objects.get(pk=1)
results = seat.results.all()
self.assertIsNotNone(get_tuples_from_results(results=results))
def _check_get_changelist(self, ma):
request = get_request()
self.assertTrue(ma.get_changelist(request=request))
def test_season_admin(self):
ma = SeasonAdmin(Season, self.site)
self._check_get_changelist(ma)
self.assertTrue(ma.get_form(request=None, obj=None))
self.assertIsInstance(ma.get_season_copy(copy_id=1), dict)
request = get_request()
season = Season.objects.get(pk=1)
season_form = ma.get_form(request=request, obj=season)
self.assertTrue(ma.print_copy_season(obj=season))
self.assertIsNotNone(SeasonAdminForm(season_form))
def _test_copy_url(self, COPY_URL, method_to_copy, data=None):
season = Season.objects.get(pk=1)
if data:
request = self.factory.post(COPY_URL, data)
else:
request = self.factory.get(COPY_URL)
ma = SeasonAdmin(Season, self.site)
getattr(ma, method_to_copy)(request, season.pk)
def _test_copy_items(self, COPY_URL, method_to_copy, new_season, items):
self._test_copy_url(COPY_URL, method_to_copy)
post_destiny = {
'season_destiny': new_season.pk,
'items': items,
'_selector': True
}
self._test_copy_url(COPY_URL, method_to_copy, post_destiny)
del post_destiny['_selector']
post_destiny['_confirm'] = True
self._test_copy_url(COPY_URL, method_to_copy, post_destiny)
def test_season_copy_items(self):
Season.objects.create(
competition_id=1,
year=2099,
punctuation='F1-25',
rounds=30
)
# todo check rounds
new_season = Season.objects.get(competition_id=1, year=2099)
season = Season.objects.get(pk=1)
# races
COPY_RACES_URL = reverse('admin:dr27-copy-races', kwargs={'pk': season.pk})
races = [race.pk for race in season.races.all()]
self._test_copy_items(COPY_RACES_URL, 'get_copy_races', new_season, races)
ma = SeasonAdmin(Season, self.site)
self.assertIn(COPY_RACES_URL, ma.print_copy_races(season))
def test_season_duplicate(self):
season = Season.objects.last()
COPY_SEASON_URL = reverse('admin:dr27-copy-season', kwargs={'pk': season.pk})
request = self.factory.get(COPY_SEASON_URL)
ma = SeasonAdmin(Season, self.site)
getattr(ma, 'get_duplicate_season')(request, season.pk)
request = self.factory.post(COPY_SEASON_URL, data={'year': season.year, '_selector': True})
ma = SeasonAdmin(Season, self.site)
getattr(ma, 'get_duplicate_season')(request, season.pk)
NEW_SEASON_YEAR = 9999
request = self.factory.post(COPY_SEASON_URL, data={'year': NEW_SEASON_YEAR, '_selector': True})
ma = SeasonAdmin(Season, self.site)
getattr(ma, 'get_duplicate_season')(request, season.pk)
self.assertTrue(Season.objects.get(competition=season.competition, year=NEW_SEASON_YEAR))
def test_driver_admin(self):
ma = DriverAdmin(Driver, self.site)
self._check_get_changelist(ma)
def test_team_admin(self):
ma = TeamAdmin(Team, self.site)
self._check_get_changelist(ma)
team = Team.objects.get(pk=1)
self.assertTrue(ma.print_competitions(team))
def test_competition_admin(self):
ma = CompetitionAdmin(Competition, self.site)
self._check_get_changelist(ma)
def test_circuit_admin(self):
ma = CircuitAdmin(Circuit, self.site)
self._check_get_changelist(ma)
def test_grandprix_admin(self):
ma = GrandPrixAdmin(GrandPrix, self.site)
self._check_get_changelist(ma)
def _test_season_formset(self, child_model, formset, fields):
inline_formset = inlineformset_factory(Season, child_model, formset=formset,
fields=fields,
form=forms.ModelForm, can_delete=True)
return inline_formset
def _test_season_formset_copy(self, child_model, formset, fields, data=None):
inline_formset = self._test_season_formset(child_model, formset, fields)
# inline_formset.request = self.factory.request(QUERY_STRING='copy=1')
related_formset = inline_formset(data)
# self.assertTrue(related_formset.get_copy(copy_id=1))
# self.assertFalse(related_formset.is_empty_form())
self.assertFalse(related_formset.has_changed())
return related_formset
def test_race_formset(self):
self._test_season_formset_copy(Race, RaceFormSet,
('round', 'grand_prix', 'circuit', 'date', 'alter_punctuation'))
def test_race_admin(self):
ma = RaceAdmin(Race, self.site)
self._check_get_changelist(ma)
race = Race.objects.get(pk=1)
self.assertEquals(ma.print_pole(race), str(race.pole.driver))
self.assertEquals(ma.print_winner(race), str(race.winner.driver))
self.assertEquals(ma.print_fastest(race), str(race.fastest.driver))
self.assertEquals(ma.clean_position('1'), 1)
def _check_formfield_for_foreignkey(self, ma, request_obj, dbfield):
request = get_request()
request._obj_ = request_obj
self.assertIsNotNone(ma.formfield_for_foreignkey(dbfield, request=request))
def test_race_inline(self):
race = Race.objects.get(pk=1)
race_ma = RaceInline(SeasonAdmin, self.site)
request = get_request()
# self.assertIsNotNone(race_ma.get_formset())
season = race.season
self._check_formfield_for_foreignkey(race_ma, request_obj=season, dbfield=Race.grand_prix.field)
def test_result_inline(self):
race = Race.objects.get(pk=1)
result = race.results.filter(points__gt=0).first()
self.assertEqual(ResultInline(RaceAdmin, self.site).points(result), result.points)
result = race.results.filter(points=0).first()
self.assertFalse(ResultInline(RaceAdmin, self.site).points(result))
def test_race_admin_fastest_car(self):
race = Race.objects.get(pk=1)
self.assertTrue(RaceAdminForm(instance=race))
def test_seat_inline(self):
driver = Driver.objects.get(pk=1)
seat = driver.seats.first()
link = str(reverse('admin:driver27_seat_change', args=[seat.pk]))
self.assertIn(link, SeatInline(DriverAdmin, self.site).edit(seat))
def _test_edit_positions(self, request, **kwargs):
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
self.assertTrue(RaceAdmin(Race, self.site).edit_positions(request=request, **kwargs))
def test_edit_positions(self):
race = Race.objects.filter(results__isnull=False).first()
kwargs = {'pk': race.pk}
URL = reverse('admin:driver27_race_results', kwargs=kwargs)
request = self.factory.get(URL, follow=True)
self._test_edit_positions(request, **kwargs)
request = self.factory.post(URL, follow=True)
self._test_edit_positions(request, **kwargs)
positions = to_delete = ''
data={'positions': positions, 'to_delete': to_delete}
request = self.factory.post(URL, data=data, follow=True)
self._test_edit_positions(request, **kwargs)
race_results = race.results.all()
race_results_count = race_results.count()
result = race_results.filter(finish__gte=1).first()
result.finish += 1
position = json.dumps([{
'seat_id': result.seat_id,
'qualifying': result.qualifying,
'finish': result.finish,
'retired': result.retired,
'wildcard': result.wildcard}])
result_to_delete = race.results.filter(finish__gte=1).exclude(pk=result.pk).last()
data={'positions': position, 'to_delete': json.dumps([result_to_delete.pk])}
request = self.factory.post(URL, data=data, follow=True)
self._test_edit_positions(request, **kwargs)
self.assertNotEqual(race.results.count(), race_results_count)
self.assertEqual(race.results.get(pk=result.pk).finish, result.finish)
self.assertFalse(race.results.filter(pk=result_to_delete.pk).exists())
data={'positions': position, 'to_delete': json.dumps([result.pk])}
request = self.factory.post(URL, data=data, follow=True)
self._test_edit_positions(request, **kwargs)
self.assertTrue(race.results.filter(seat_id=result.seat_id).exists())
# Currently not exists race with no results
# def test_race_with_no_results(self):
# ma = RaceAdmin(Race, self.site)
# race = Race.objects.get(pk=20) # No results
# self.assertIsNone(ma.print_pole(race))
# self.assertIsNone(ma.print_winner(race))
# self.assertIsNone(ma.print_fastest(race))
def test_related_competition_admin(self):
race = Race.objects.get(pk=1)
related_competition = RelatedCompetitionAdmin()
# maybe exception
self.assertIsNone(related_competition.print_competitions(race))
def test_circuit_widget(self):
grand_prix = GrandPrix.objects.filter(default_circuit__isnull=False).first()
self.assertEqual(get_circuit_id_from_gp(grand_prix.pk), grand_prix.default_circuit.pk)
def test_grand_prix_widget_create_option(self):
attrs = {'id': 'id_grand_prix'}
grand_prix = GrandPrix.objects.filter(default_circuit__isnull=False).first()
index = grand_prix.pk
label = '{grand_prix}'.format(grand_prix=grand_prix)
name = 'grand_prix'
selected = True
subindex = None
value = grand_prix.pk
widget = GrandPrixWidget()
option = widget.create_option(name, value, label, selected, index, subindex, attrs)
if option: # >3.7
self.assertEqual(option['attrs']['data-circuit'], get_circuit_id_from_gp(value))
def test_grand_prix_widget_render_option(self):
widget = GrandPrixWidget()
option = widget.render_option([], None, '')
self.assertIn('value=""', option)
grand_prix = GrandPrix.objects.filter(default_circuit__isnull=False).first()
pk_circuit = grand_prix.default_circuit.pk
widget = GrandPrixWidget()
option = widget.render_option([], grand_prix.pk, str(grand_prix))
self.assertIn('data-circuit="{pk_circuit}"'.format(pk_circuit=pk_circuit), option)
self.assertNotIn(' selected="selected"', option)
widget = GrandPrixWidget()
option = widget.render_option([u'{gp}'.format(gp=grand_prix.pk)], grand_prix.pk, str(grand_prix))
self.assertIn(' selected="selected"', option)
class DR27Api(APITestCase):
fixtures = get_fixtures_test()
def _GET_request(self, reverse_url, kwargs=None, code=200):
reverse_url = ':'.join([DRIVER27_NAMESPACE, DRIVER27_API_NAMESPACE, reverse_url])
request_url = reverse(reverse_url, kwargs=kwargs)
response = self.client.get(request_url, format='json')
self.assertEqual(response.status_code, code)
def test_api_circuit(self):
self._GET_request('circuit-list')
def test_api_competition(self):
self._GET_request('competition-list')
self._GET_request('competition-detail', kwargs={'pk': 1})
self._GET_request('competition-next-race', kwargs={'pk': 1})
self._GET_request('competition-teams', kwargs={'pk': 1})
def test_api_driver(self):
self._GET_request('driver-list')
self._GET_request('driver-detail', kwargs={'pk': 1})
self._GET_request('driver-seats', kwargs={'pk': 1})
def test_api_grand_prix(self):
self._GET_request('grandprix-list')
self._GET_request('grandprix-detail', kwargs={'pk': 1})
def test_api_race(self):
self._GET_request('race-list')
self._GET_request('race-detail', kwargs={'pk': 1})
self._GET_request('race-no-seats', kwargs={'pk': 1})
self._GET_request('race-results', kwargs={'pk': 1})
self._GET_request('race-seats', kwargs={'pk': 1})
self._GET_request('race-drivers', kwargs={'pk': 1})
def test_api_result(self):
self._GET_request('result-list')
self._GET_request('result-detail', kwargs={'pk': 1})
def test_api_season(self):
self._GET_request('season-list')
self._GET_request('season-detail', kwargs={'pk': 1})
self._GET_request('season-next-race', kwargs={'pk': 1})
self._GET_request('season-no-seats', kwargs={'pk': 1})
self._GET_request('season-races', kwargs={'pk': 1})
self._GET_request('season-seats', kwargs={'pk': 1})
self._GET_request('season-standings', kwargs={'pk': 1})
self._GET_request('season-standings-team', kwargs={'pk': 1})
self._GET_request('season-teams', kwargs={'pk': 1})
self._GET_request('season-drivers', kwargs={'pk': 1})
self._GET_request('season-title', kwargs={'pk': 1})
def test_api_seat(self):
self._GET_request('seat-list')
self._GET_request('seat-detail', kwargs={'pk': 1})
self._GET_request('seat-periods', kwargs={'pk': 1})
self._GET_request('seatperiod-list')
self._GET_request('seatperiod-detail', kwargs={'pk': 1})
def test_api_team(self):
self._GET_request('team-list')
self._GET_request('team-detail', kwargs={'pk': 1})
| 41.332681
| 105
| 0.674731
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.