hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace981af70aaaab22521a8d5075bd6db7436791d | 327 | py | Python | code/Algorithms/Searching/binary.py | souvikxcoder/Python | 1d24f642c68fc7f75b2971f85065867ff1c51cc1 | [
"MIT"
] | null | null | null | code/Algorithms/Searching/binary.py | souvikxcoder/Python | 1d24f642c68fc7f75b2971f85065867ff1c51cc1 | [
"MIT"
] | null | null | null | code/Algorithms/Searching/binary.py | souvikxcoder/Python | 1d24f642c68fc7f75b2971f85065867ff1c51cc1 | [
"MIT"
] | null | null | null | # binary search
def binary(srchlist,srch):
"""list needs to be in ascending order to search for element"""
first = 0
last = len(srchlist)-1
while first <= last:
mid = (first + last)/2
if srch > srchlist[mid]:
first = mid+1
elif srch < srchlist[mid]:
last = mid-1
else:
return mid
break
return -1
| 16.35 | 64 | 0.633028 |
ace981feda47bff4a08907ccec700b765e1948a6 | 52 | py | Python | ygq/wsgi.py | chichengfengxue/flask-youguoqi | e1423602eaea938cb69b7ff0d60c6e61becceca5 | [
"MIT"
] | null | null | null | ygq/wsgi.py | chichengfengxue/flask-youguoqi | e1423602eaea938cb69b7ff0d60c6e61becceca5 | [
"MIT"
] | null | null | null | ygq/wsgi.py | chichengfengxue/flask-youguoqi | e1423602eaea938cb69b7ff0d60c6e61becceca5 | [
"MIT"
] | null | null | null | from .__init__ import create_app
app = create_app()
| 17.333333 | 32 | 0.788462 |
ace984028b7d55c27fd29802f4c4c858dea8367c | 155 | py | Python | bin/ominoes/pentominoes-cross-3.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | null | null | null | bin/ominoes/pentominoes-cross-3.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | null | null | null | bin/ominoes/pentominoes-cross-3.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | 1 | 2022-01-02T16:54:14.000Z | 2022-01-02T16:54:14.000Z | #!/usr/bin/env python
# $Id$
"""28 solutions"""
import puzzler
from puzzler.puzzles.pentominoes import PentominoesCross3
puzzler.run(PentominoesCross3)
| 15.5 | 57 | 0.774194 |
ace98409b2ce438fe30926da6e25e997fd100183 | 309 | py | Python | Introducao_a_programacao/Aula_03/Aprendendo_divisao.py | felipefreitassilva/PUCRS-Eng_Software | 8ca6d4ed6f8ab3192b2af77bdef5e8d4d673cd9e | [
"MIT"
] | null | null | null | Introducao_a_programacao/Aula_03/Aprendendo_divisao.py | felipefreitassilva/PUCRS-Eng_Software | 8ca6d4ed6f8ab3192b2af77bdef5e8d4d673cd9e | [
"MIT"
] | null | null | null | Introducao_a_programacao/Aula_03/Aprendendo_divisao.py | felipefreitassilva/PUCRS-Eng_Software | 8ca6d4ed6f8ab3192b2af77bdef5e8d4d673cd9e | [
"MIT"
] | null | null | null | #Variáveis
#n1, n2, divisao_inteira, resto: number
#Início
def main():
n1 = int(input("Digite aqui um número: "))
n2 = int(input("Digite aqui um número: "))
divisao_inteira = n1//n2
resto = n1%n2
print(n1,"dividido por",n2,"é",divisao_inteira,", e possui resto:",resto)
main()
#Fim | 28.090909 | 77 | 0.634304 |
ace984d4b3cac4c3ef93e5dc817582e1a1129de0 | 728 | bzl | Python | dart/dart_grpc_library.bzl | mirandacong/rules_proto | b74e93b3a197401da858423d2758aaf4f38be4f9 | [
"Apache-2.0"
] | null | null | null | dart/dart_grpc_library.bzl | mirandacong/rules_proto | b74e93b3a197401da858423d2758aaf4f38be4f9 | [
"Apache-2.0"
] | null | null | null | dart/dart_grpc_library.bzl | mirandacong/rules_proto | b74e93b3a197401da858423d2758aaf4f38be4f9 | [
"Apache-2.0"
] | null | null | null | load("//dart:dart_grpc_compile.bzl", "dart_grpc_compile")
load("@io_bazel_rules_dart//dart/build_rules:core.bzl", "dart_library")
def dart_grpc_library(**kwargs):
name = kwargs.get("name")
deps = kwargs.get("deps")
verbose = kwargs.get("verbose")
visibility = kwargs.get("visibility")
name_pb = name + "_pb"
dart_grpc_compile(
name = name_pb,
deps = deps,
visibility = visibility,
verbose = verbose,
)
dart_library(
name = name,
srcs = [name_pb],
deps = [
str(Label("@vendor_protobuf//:protobuf")),
str(Label("@vendor_grpc//:grpc")),
],
pub_pkg_name = name,
visibility = visibility,
)
| 26 | 71 | 0.581044 |
ace987f77cdfd911d59105351a05172a674897ce | 839 | py | Python | commands/login.py | ToxicFrog/doomrl-server | 7854f0748a4f47ff21b8683bb23bebbfe837a46d | [
"MIT"
] | 4 | 2015-06-01T02:52:58.000Z | 2020-09-06T23:47:36.000Z | commands/login.py | ToxicFrog/doomrl-server | 7854f0748a4f47ff21b8683bb23bebbfe837a46d | [
"MIT"
] | 7 | 2015-02-16T13:28:31.000Z | 2016-10-18T01:40:24.000Z | commands/login.py | ToxicFrog/doomrl-server | 7854f0748a4f47ff21b8683bb23bebbfe837a46d | [
"MIT"
] | 1 | 2019-05-13T18:49:32.000Z | 2019-05-13T18:49:32.000Z | from commands import Command
import doomrl
from syslog import syslog as log
class LoginCommand(Command):
"""login <name> <pass> -- log in to an existing account.
This is necessary in order to actually play games. To create a new account,
use "register"."""
nargs = 2
def run(self, name, password):
if doomrl.user():
return 'You are already logged in!'
if not name or not password:
return 'You must specify both a username and a password.'
# Check password
try:
with open(doomrl.homepath('passwd', user=name)) as f:
passwd = f.read()
if passwd == password:
log('%s successfully logged in.' % name)
doomrl.login(name)
return
except IOError as e:
pass
log('Failed login attempt as %s' % name)
doomrl.login()
return 'Login failed.'
| 24.676471 | 77 | 0.640048 |
ace98870326b4188f895aaec8088afe111de4dfc | 4,725 | py | Python | django-project/lead/settings.py | KBIAnews/kbia-lead | 094da88959ac91c774b2551c3d74f9614b146156 | [
"MIT"
] | null | null | null | django-project/lead/settings.py | KBIAnews/kbia-lead | 094da88959ac91c774b2551c3d74f9614b146156 | [
"MIT"
] | null | null | null | django-project/lead/settings.py | KBIAnews/kbia-lead | 094da88959ac91c774b2551c3d74f9614b146156 | [
"MIT"
] | null | null | null | """
Django settings for lead project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['KBIA_BAKERIES_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost','nathanlawrence.fwd.wf']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'storages',
'bakery',
'markdownify',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'lead.urls'
ROOT_URLCONF = 'lead.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.static',
],
},
},
]
WSGI_APPLICATION = 'lead.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'lead',
'USER': os.getenv('KBIA_BAKERIES_DB_USER'),
'PASSWORD': os.getenv('KBIA_BAKERIES_DB_PASS'),
'HOST': os.getenv('KBIA_BAKERIES_DB_URL'),
'PORT': '5432',
},
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
USE_I18N = True
USE_L10N = True
USE_TZ = True
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/lead/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_DIRS =( os.path.join(STATIC_ROOT, 'css/'),
os.path.join(STATIC_ROOT, 'js/'),
os.path.join(STATIC_ROOT, 'img/'),
)
# File Storage.
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
# STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
AWS_STORAGE_BUCKET_NAME = 'media.kbia.org'
AWS_LOCATION = 'lead'
AWS_ACCESS_KEY_ID = os.getenv('KBIA_BAKERIES_AWS_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('KBIA_BAKERIES_AWS_KEY')
AWS_QUERYSTRING_AUTH = False
# Django Bakery Settings
BUILD_DIR = os.path.join(PROJECT_ROOT, 'baked')
AWS_BUCKET_NAME = 'kbia-lead'
AWS_S3_ENDPOINT = 'https://s3-accelerate.amazonaws.com'
BAKERY_VIEWS= ()
# Django Markdownify Settings
MARKDOWNIFY_WHITELIST_TAGS = [
'a',
'abbr',
'acronym',
'b',
'blockquote',
'em',
'i',
'li',
'ol',
'p',
'strong',
'ul',
]
MARKDOWNIFY_BLEACH = False | 27.155172 | 91 | 0.693122 |
ace9888c45e5c4821edb10217de496d943d30dd0 | 6,406 | py | Python | Web/Python/server.py | biddisco/VTK | 80fa7c3a767ce306586a596a6c6f3518a34e2f11 | [
"BSD-3-Clause"
] | 1 | 2020-05-20T08:20:44.000Z | 2020-05-20T08:20:44.000Z | Web/Python/server.py | biddisco/VTK | 80fa7c3a767ce306586a596a6c6f3518a34e2f11 | [
"BSD-3-Clause"
] | null | null | null | Web/Python/server.py | biddisco/VTK | 80fa7c3a767ce306586a596a6c6f3518a34e2f11 | [
"BSD-3-Clause"
] | null | null | null | r"""server is a module that enables using VTK through a web-server. This
module implments a WampServerProtocol that provides the core RPC-API needed to
place interactive visualization in web-pages. Developers can extent
ServerProtocol to provide additional RPC callbacks for their web-applications.
This module can be used as the entry point to the application. In that case, it
sets up a Twisted web-server that can generate visualizations as well as serve
web-pages are determines by the command line arguments passed in.
Use "--help" to list the supported arguments.
"""
import types
import logging
from threading import Timer
from twisted.python import log
from twisted.internet import reactor
from autobahn.websocket import listenWS
from autobahn.wamp import exportRpc, \
WampServerProtocol
from autobahn.resource import WebSocketResource
from autobahn.wamp import WampServerFactory
from . import wamp
from . import testing
# =============================================================================
# Setup default arguments to be parsed
# -s, --nosignalhandlers
# -d, --debug
# -p, --port 8080
# -t, --timeout 300 (seconds)
# -c, --content '/www' (No content means WebSocket only)
# -a, --authKey vtkweb-secret
# =============================================================================
def add_arguments(parser):
"""
Add arguments processed know to this module. parser must be
argparse.ArgumentParser instance.
"""
import os
parser.add_argument("-d", "--debug",
help="log debugging messages to stdout",
action="store_true")
parser.add_argument("-s", "--nosignalhandlers",
help="Prevent Twisted to install the signal handlers so it can be started inside a thread.",
action="store_true")
parser.add_argument("-i", "--host", type=str, default='localhost',
help="the interface for the web-server to listen on (default: localhost)")
parser.add_argument("-p", "--port", type=int, default=8080,
help="port number for the web-server to listen on (default: 8080)")
parser.add_argument("-t", "--timeout", type=int, default=300,
help="timeout for reaping process on idle in seconds (default: 300s)")
parser.add_argument("-c", "--content", default='',
help="root for web-pages to serve (default: none)")
parser.add_argument("-a", "--authKey", default='vtkweb-secret',
help="Authentication key for clients to connect to the WebSocket.")
parser.add_argument("-f", "--force-flush", default=False, help="If provided, this option will force additional padding content to the output. Useful when application is triggered by a session manager.", dest="forceFlush", action='store_true')
# Hook to extract any testing arguments we need
testing.add_arguments(parser)
return parser
# =============================================================================
# Parse arguments and start webserver
# =============================================================================
def start(argv=None,
protocol=wamp.ServerProtocol,
description="VTK/Web web-server based on Twisted."):
"""
Sets up the web-server using with __name__ == '__main__'. This can also be
called directly. Pass the opational protocol to override the protocol used.
Default is ServerProtocol.
"""
try:
import argparse
except ImportError:
# since Python 2.6 and earlier don't have argparse, we simply provide
# the source for the same as _argparse and we use it instead.
import _argparse as argparse
parser = argparse.ArgumentParser(description=description)
add_arguments(parser)
args = parser.parse_args(argv)
start_webserver(options=args, protocol=protocol)
# =============================================================================
# Stop webserver
# =============================================================================
def stop_webserver() :
reactor.callFromThread(reactor.stop)
# =============================================================================
# Start webserver
# =============================================================================
def start_webserver(options, protocol=wamp.ServerProtocol, disableLogging=False):
"""
Starts the web-server with the given protocol. Options must be an object
with the following members:
options.host : the interface for the web-server to listen on
options.port : port number for the web-server to listen on
options.timeout : timeout for reaping process on idle in seconds
options.content : root for web-pages to serve.
"""
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.static import File
import sys
if not disableLogging:
log.startLogging(sys.stdout)
# setup the server-factory
wampFactory = wamp.ReapingWampServerFactory(
"ws://%s:%d" % (options.host, options.port), options.debug, options.timeout)
wampFactory.protocol = protocol
# Do we serve static content or just websocket ?
if len(options.content) == 0:
# Only WebSocket
listenWS(wampFactory)
else:
# Static HTTP + WebSocket
wsResource = WebSocketResource(wampFactory)
root = File(options.content)
root.putChild("ws", wsResource)
site = Site(root)
reactor.listenTCP(options.port, site)
# Work around to force the output buffer to be flushed
# This allow the process launcher to parse the output and
# wait for "Start factory" to know that the WebServer
# is running.
if options.forceFlush :
for i in range(200):
log.msg("+"*80, logLevel=logging.CRITICAL)
# Give test client a chance to initialize a thread for itself
# testing.initialize(opts=options)
# Start the factory
wampFactory.startFactory()
# Initialize testing: checks if we're doing a test and sets it up
testing.initialize(options, reactor)
# Start the reactor
if options.nosignalhandlers:
reactor.run(installSignalHandlers=0)
else:
reactor.run()
# Stope the factory
wampFactory.stopFactory()
# Give the testing module a chance to finalize, if necessary
testing.finalize()
if __name__ == "__main__":
start()
| 37.244186 | 247 | 0.628473 |
ace988c705dead0e4918d2732d10b3f9176da38a | 1,841 | py | Python | models.py | GalacticFog/coinkite-real-time-invoice | fa95e757e7a051f91671d00e9d606cc163ba83be | [
"MIT"
] | 1 | 2021-10-31T19:06:16.000Z | 2021-10-31T19:06:16.000Z | models.py | petertodd/coinkite-real-time-invoice | 04b2df646fa0755984c3e255ddd2717527666695 | [
"MIT"
] | null | null | null | models.py | petertodd/coinkite-real-time-invoice | 04b2df646fa0755984c3e255ddd2717527666695 | [
"MIT"
] | 3 | 2015-12-02T11:43:19.000Z | 2021-10-31T19:06:00.000Z | #
# Database Models
#
import datetime
from google.appengine.ext import ndb
# Max time, in seconds, for customer to pay the invoice.
MAX_PAY_TIME = 16*60
def NOW():
return datetime.datetime.utcnow()
class DecimalProperty(ndb.PickleProperty):
pass
class MyInvoice(ndb.Model):
"""Model for one invoice"""
created_at = ndb.DateTimeProperty(auto_now_add=True)
token = ndb.StringProperty(indexed = True)
# the "ask" amount
amount = DecimalProperty(required = True)
amount_cct = ndb.StringProperty(required = True)
# actual amount expected after conversion
payable = DecimalProperty(required = True)
payable_cct = ndb.StringProperty(choices = ['BTC', 'LTC', 'XTN', 'BLK'])
label = ndb.StringProperty()
show_fiat = ndb.BooleanProperty()
# After we talk to CK:
pubkey = ndb.StringProperty(required=True)
ck_refnum = ndb.StringProperty(required=True)
paid_at = ndb.DateTimeProperty(default=None)
@classmethod
def recent_invoices(cls):
return cls.query().order(-cls.created_at)
@classmethod
def get_by_token(cls, token_key):
return cls.query(cls.token == token_key).get()
def get_url(self):
return '/invoice/%s' % self.token
@property
def has_no_conversion(self):
return self.payable_cct == self.amount_cct
@property
def is_expired(self):
if self.has_no_conversion: return False
return (NOW() - self.created_at).total_seconds() > MAX_PAY_TIME
@property
def is_recent(self):
# created in last 30 seconds; for highlight in table
return (NOW() - self.created_at).total_seconds() < 30
def get_time_left(self):
if self.has_no_conversion: return MAX_PAY_TIME
return max(0, MAX_PAY_TIME - (NOW() - self.created_at).total_seconds())
# EOF
| 27.073529 | 79 | 0.678979 |
ace989352cd737d329c890d0bf5995ee99c77382 | 11,422 | py | Python | ext/OnAVOS/datasets/CUHK03/CUHK03.py | dbadrian/gdk_dlrc17 | 7aebed740dc4a09f3549674b0cfeb22bdb392ac6 | [
"MIT"
] | 1 | 2019-03-29T12:36:55.000Z | 2019-03-29T12:36:55.000Z | ext/OnAVOS/datasets/CUHK03/CUHK03.py | dbadrian/gdk_dlrc17 | 7aebed740dc4a09f3549674b0cfeb22bdb392ac6 | [
"MIT"
] | null | null | null | ext/OnAVOS/datasets/CUHK03/CUHK03.py | dbadrian/gdk_dlrc17 | 7aebed740dc4a09f3549674b0cfeb22bdb392ac6 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy
import os
from datasets.Dataset import Dataset
from datasets.Util.Reader import load_image_tensorflow, load_normalized_image_tensorflow
from datasets.Util.Util import smart_shape, username
from datasets.Augmentors import parse_augmentors, apply_augmentors
from datasets.Util.Normalization import normalize
# CUHK03_DEFAULT_PATH = "/fastwork/" + username() + "/mywork/data/CUHK03/"
CUHK03_DEFAULT_PATH = "/home/" + username() + "/dlrc17-gdk/ext/OnAVOS/custom_dataset/JPEGImages/480p/"
DEFAULT_INPUT_SIZE = [270, 90]
CUHK03_VOID_LABEL = 255
class CUHK03Dataset(Dataset):
def __init__(self, config, subset, coord):
super(CUHK03Dataset, self).__init__(subset)
assert subset in ("train", "valid"), subset
self.ignore_classes = None
self.config = config
self.subset = subset
self.coord = coord
self.data_dir = config.unicode("data_dir", CUHK03_DEFAULT_PATH)
self.model = config.unicode("model","")
self.train_folder = config.unicode("train_folder","train/")
self.epoch_length = config.int("epoch_length", 1000)
self.n_classes = config.int("num_classes",None)
self.input_size = config.int_list("input_size", DEFAULT_INPUT_SIZE)
self.input_size = tuple(self.input_size)
self.batching_mode = config.unicode("batching_mode", "single")
assert self.batching_mode in ("single","pair","group"), self.batching_mode
self.validation_mode = config.unicode("validation_mode","embedding")
assert self.validation_mode in ("embedding", "similarity"), self.validation_mode
self.group_size = config.int("group_size",4)
self.pair_ratio = config.float("pair_ratio", 1.0)
augmentor_strings = self.config.unicode_list("augmentors_train", [])
self.augmentors = parse_augmentors(augmentor_strings, self.void_label())
self.train_names = sorted(os.listdir(self.data_dir + self.train_folder))
train_val = numpy.array([(int(r.split('_')[0]), int(r.split('_')[1].split('.')[0])) for r in self.train_names])
train_id_list,train_counts = numpy.unique(train_val[:,0],return_counts=True)
self.train_counts = tf.constant(train_counts.astype(numpy.int32))
self.num_train_id = train_id_list.shape[0]
self.num_test_id = 6
self.idx_placeholder = tf.placeholder(tf.int32, (4,), "idx")
self.test_case = tf.placeholder(tf.string)
self.use_end_network = tf.placeholder(tf.bool)
def num_examples_per_epoch(self):
return self.epoch_length
def num_classes(self):
return self.n_classes
def create_input_tensors_dict(self, batch_size):
########################
####### TRAINING #######
########################
if self.subset in "train":
####### Paired Batch-Mode #######
if self.batching_mode in "pair":
assert batch_size % 2 == 0
batch_size /= 2
rand = tf.random_uniform([5], maxval=tf.int32.max, dtype=tf.int32)
sample_same_person = rand[0] % 2
pers_id_1 = ((rand[1] - 1) % self.num_train_id) + 1
pers_1_n_imgs = self.train_counts[pers_id_1-1]
img_id_1 = ((rand[2] - 1) % pers_1_n_imgs) + 1
def if_same_person():
pers_id_2 = pers_id_1
img_id_2 = ((rand[4] - 1) % (pers_1_n_imgs-1)) + 1
img_id_2 = tf.cond(img_id_2 >= img_id_1, lambda: img_id_2 + 1, lambda: img_id_2)
return pers_id_2, img_id_2
def if_not_same_person():
pers_id_2 = ((rand[3] - 1) % (self.num_train_id - 1)) + 1
pers_id_2 = tf.cond(pers_id_2 >= pers_id_1, lambda: pers_id_2 + 1, lambda: pers_id_2)
pers_2_n_imgs = self.train_counts[pers_id_2-1]
img_id_2 = ((rand[4] - 1) % pers_2_n_imgs) + 1
return pers_id_2, img_id_2
pers_id_2, img_id_2 = tf.cond(tf.cast(sample_same_person, tf.bool), if_same_person, if_not_same_person)
img1 = tf.as_string(pers_id_1, width=5, fill="0") + "_" + tf.as_string(img_id_1, width=4, fill="0") + ".png"
img2 = tf.as_string(pers_id_2, width=5, fill="0") + "_" + tf.as_string(img_id_2, width=4, fill="0") + ".png"
tag = img1 + " " + img2 + " " + tf.as_string(sample_same_person)
img1 = self.data_dir + self.train_folder +'/'+ img1
img2 = self.data_dir + self.train_folder +'/'+ img2
img_val1 = load_image_tensorflow(img1, jpg=False)
img_val1.set_shape(self.input_size + (3,))
tensors = {"unnormalized_img": img_val1}
tensors = apply_augmentors(tensors, self.augmentors)
img_val1 = tensors["unnormalized_img"]
img_val1 = normalize(img_val1)
img_val2 = load_image_tensorflow(img2, jpg=False)
img_val2.set_shape(self.input_size + (3,))
tensors = {"unnormalized_img": img_val2}
tensors = apply_augmentors(tensors, self.augmentors)
img_val2 = tensors["unnormalized_img"]
img_val2 = normalize(img_val2)
pair = tf.stack([img_val1, img_val2])
label = sample_same_person
imgs, labels, tags = tf.train.batch([pair, label, tag], batch_size=batch_size)
shape = smart_shape(imgs)
shape2 = shape[1:]
shape2[0] *= batch_size
imgs = tf.reshape(imgs, shape2)
####### Group Batch-Mode #######
elif self.batching_mode in "group":
assert batch_size % self.group_size == 0
batch_size /= self.group_size
batch_size = int(batch_size)
pers_ids = tf.random_shuffle(tf.range(1, self.num_train_id))[0:batch_size]
def for_each_identity(p_idx):
pers_id = pers_ids[p_idx]
img_ids = tf.tile(tf.random_shuffle(tf.range(1, self.train_counts[pers_id - 1])), [4])[0:self.group_size]
def for_each_img(i_idx):
img_id = img_ids[i_idx]
tag = tf.as_string(pers_id, width=5, fill="0") + "_" + tf.as_string(img_id, width=4, fill="0") + ".png"
img = load_image_tensorflow(self.data_dir + self.train_folder +'/'+ tag, jpg=False)
img.set_shape(self.input_size + (3,))
tensors = {"unnormalized_img": img}
tensors = apply_augmentors(tensors, self.augmentors)
img = tensors["unnormalized_img"]
img = normalize(img)
label = p_idx
img.set_shape(self.input_size + (3,))
return img, label, tag
imgs, labels, tags = tf.map_fn(for_each_img, tf.range(0, self.group_size),
dtype=(tf.float32, tf.int32, tf.string))
return imgs, labels, tags
imgs, labels, tags = tf.map_fn(for_each_identity, tf.range(0, batch_size),
dtype=(tf.float32, tf.int32, tf.string))
def reshape(x):
shape = smart_shape(x)
shape2 = shape[1:]
shape2[0] = self.group_size * batch_size
x = tf.reshape(x, shape2)
return x
imgs = reshape(imgs)
labels = reshape(labels)
tags = reshape(tags)
####### Single Batch-Mode #######
else: # self.batching_mode in "single":
rand = tf.random_uniform([2], maxval=tf.int32.max, dtype=tf.int32)
pers_id_1 = ((rand[0] - 1) % self.num_train_id) + 1
pers_1_n_imgs = self.train_counts[pers_id_1 - 1]
img_id_1 = ((rand[1] - 1) % pers_1_n_imgs) + 1
img1 = tf.as_string(pers_id_1, width=5, fill="0") + "_" + tf.as_string(img_id_1, width=4, fill="0") + ".png"
tag = img1
img1 = self.data_dir + self.train_folder + '/' + img1
img_val1 = load_image_tensorflow(img1, jpg=False)
img_val1.set_shape(self.input_size + (3,))
tensors = {"unnormalized_img": img_val1}
tensors = apply_augmentors(tensors, self.augmentors)
img_val1 = tensors["unnormalized_img"]
img_val1 = normalize(img_val1)
label = pers_id_1
imgs, labels, tags = tf.train.batch([img_val1, label, tag], batch_size=batch_size)
##########################
####### Validation #######
##########################
else: # self.subset in "valid":
####### Similarity Validation-Mode #######
if self.validation_mode in "similarity":
path = self.test_case + '/'
start_idx = self.idx_placeholder[0]
end_idx = self.idx_placeholder[1]
end_net = self.use_end_network
def if_end_net():
pdx = self.idx_placeholder[2]
def _load_imgs(idx):
img1_idx = pdx + 1
img2_idx = idx + 1
label = tf.cond(abs(img1_idx - img2_idx) <= 0, lambda: img1_idx * 0 + 1, lambda: img1_idx * 0)
img1 = path + tf.as_string(img1_idx, width=4, fill="0") + "_1.png"
img2 = path + tf.as_string(img2_idx, width=4, fill="0") + "_2.png"
tag = img1 + " " + img2 + " " + tf.as_string(label)
img_val1 = tf.zeros(self.input_size + (3,))
img_val1.set_shape(self.input_size + (3,))
img_val2 = tf.zeros(self.input_size + (3,))
img_val2.set_shape(self.input_size + (3,))
pair = tf.stack([img_val1, img_val2])
return pair, label, tag
imgs, labels, tags = tf.map_fn(_load_imgs, tf.range(start_idx, end_idx),
dtype=(tf.float32, tf.int32, tf.string))
shape = smart_shape(imgs)
shape2 = shape[1:]
shape2[0] *= end_idx - start_idx
imgs = tf.reshape(imgs, shape2)
return imgs, labels, tags
def if_not_end_net():
test_size = self.idx_placeholder[2]
test_num = self.idx_placeholder[3]
def _load_imgs(idx):
label = 0
img = path + tf.as_string(idx + 1, width=4, fill="0") + "_" + tf.as_string(test_size + test_num) + ".png"
tag = img
img = self.data_dir + img
img_val = load_normalized_image_tensorflow(img, jpg=False)
img_val.set_shape(self.input_size + (3,))
return img_val, label, tag
imgs, labels, tags = tf.map_fn(_load_imgs, tf.range(start_idx, end_idx),
dtype=(tf.float32, tf.int32, tf.string))
shape = smart_shape(imgs)
imgs = tf.reshape(imgs, shape)
return imgs, labels, tags
imgs, labels, tags = tf.cond(end_net, if_end_net, if_not_end_net)
####### Embedding Validation-Mode #######
else: # self.validation_mode in "embedding":
path = self.test_case + '/'
start_idx = self.idx_placeholder[0]
end_idx = self.idx_placeholder[1]
test_size = self.idx_placeholder[2]
test_num = self.idx_placeholder[3]
def _load_imgs(idx):
label = 0
img = path + tf.as_string(test_size+1,width=5,fill="0") + "_" + tf.as_string(idx + 1, width=4, fill="0") + ".png"
tag = img
img = self.data_dir + img
img_val = load_normalized_image_tensorflow(img, jpg=False)
img_val.set_shape(self.input_size + (3,))
return img_val, label, tag
imgs, labels, tags = tf.map_fn(_load_imgs, tf.range(start_idx, end_idx),
dtype=(tf.float32, tf.int32, tf.string))
shape = smart_shape(imgs)
imgs = tf.reshape(imgs, shape)
tensors = {"inputs": imgs, "labels": labels, "tags": tags}
self.images = imgs
return tensors
def void_label(self):
return CUHK03_VOID_LABEL
| 38.85034 | 123 | 0.607337 |
ace98b0b6f29b60b2ac7ab55a5bed54adbd6934b | 401 | py | Python | hw8/mouse/spiralDraw.py | jonescarissa/csc221 | 1052b4cf9f3aab86c063c1b3845895a590bc2083 | [
"CC0-1.0"
] | null | null | null | hw8/mouse/spiralDraw.py | jonescarissa/csc221 | 1052b4cf9f3aab86c063c1b3845895a590bc2083 | [
"CC0-1.0"
] | null | null | null | hw8/mouse/spiralDraw.py | jonescarissa/csc221 | 1052b4cf9f3aab86c063c1b3845895a590bc2083 | [
"CC0-1.0"
] | 1 | 2021-09-02T03:55:17.000Z | 2021-09-02T03:55:17.000Z | import pyautogui
import time
time.sleep(5)
pyautogui.click()
distance = 300
change = 20
while distance > 0:
pyautogui.drag(distance, 0, duration=0.2)
distance = distance - change
pyautogui.drag(0, distance, duration=0.2)
pyautogui.drag(-distance, 0, duration-0.2)
distance = distance - change
pyautogui.drag(0, -distance, duration=0.2) | 30.846154 | 51 | 0.633416 |
ace98c52848fb20f6d423ad6d32971e854e2f18a | 3,990 | py | Python | intersight/models/asset_cluster_member_list.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 21 | 2018-03-29T14:20:35.000Z | 2021-10-13T05:11:41.000Z | intersight/models/asset_cluster_member_list.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 14 | 2018-01-30T15:45:46.000Z | 2022-02-23T14:23:21.000Z | intersight/models/asset_cluster_member_list.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 18 | 2018-01-03T15:09:56.000Z | 2021-07-16T02:21:54.000Z | # coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class AssetClusterMemberList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'count': 'int',
'results': 'list[AssetClusterMember]'
}
attribute_map = {
'count': 'Count',
'results': 'Results'
}
def __init__(self, count=None, results=None):
"""
AssetClusterMemberList - a model defined in Swagger
"""
self._count = None
self._results = None
if count is not None:
self.count = count
if results is not None:
self.results = results
@property
def count(self):
"""
Gets the count of this AssetClusterMemberList.
The number of assetClusterMembers matching your request in total for all pages.
:return: The count of this AssetClusterMemberList.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""
Sets the count of this AssetClusterMemberList.
The number of assetClusterMembers matching your request in total for all pages.
:param count: The count of this AssetClusterMemberList.
:type: int
"""
self._count = count
@property
def results(self):
"""
Gets the results of this AssetClusterMemberList.
The array of assetClusterMembers matching your request.
:return: The results of this AssetClusterMemberList.
:rtype: list[AssetClusterMember]
"""
return self._results
@results.setter
def results(self, results):
"""
Sets the results of this AssetClusterMemberList.
The array of assetClusterMembers matching your request.
:param results: The results of this AssetClusterMemberList.
:type: list[AssetClusterMember]
"""
self._results = results
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, AssetClusterMemberList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 25.909091 | 87 | 0.564662 |
ace98d5a11b411da43633c709195089ed3372fed | 3,075 | py | Python | crawler/darknet/darknetsettings.py | nesg-ugr/c4darknet | 7fd1e3c5070547c5fa577faebcc6d08864d26ea7 | [
"MIT"
] | 1 | 2020-10-06T12:13:27.000Z | 2020-10-06T12:13:27.000Z | crawler/darknet/darknetsettings.py | nesg-ugr/c4darknet | 7fd1e3c5070547c5fa577faebcc6d08864d26ea7 | [
"MIT"
] | null | null | null | crawler/darknet/darknetsettings.py | nesg-ugr/c4darknet | 7fd1e3c5070547c5fa577faebcc6d08864d26ea7 | [
"MIT"
] | 2 | 2020-06-11T10:56:23.000Z | 2020-06-20T17:06:12.000Z | # -*- coding: utf-8 -*-
# Scrapy settings for darknet project
#
# For simplicity, this file contains only settings considered important or
# commonly used. More settings and their documentation in:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
import settings
BOT_NAME = 'darknet'
SPIDER_MODULES = ['darknet.spiders']
NEWSPIDER_MODULE = 'darknet.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'darknet (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1
DOWNLOAD_TIMEOUT = settings.HTTP_TIMEOUT
RETRY_TIMES = settings.MAX_CRAWLING_ATTEMPTS_ON_ERROR
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'i2darknetp.middlewares.DarknetSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'darknet.middlewares.DarknetProxyMiddleware': 200,
'darknet.middlewares.DarknetFilterMiddleware': 300,
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'darknet.pipelines.DarknetPipeline': 300,
}
# The maximum depth that will be allowed to crawl for any site:
DEPTH_LIMIT = 3
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
#FEED_STORAGES_BASE = {
# '': 'darknet.exportutils.CustomFileFeedStorage',
# 'file': 'darknet.exportutils.CustomFileFeedStorage'
#}
# CUSTOM CONFIGURATION
PATH_ONGOING_SPIDERS = "darknet/spiders/ongoing/"
PATH_FINISHED_SPIDERS = "darknet/spiders/finished/"
PATH_LOG = '../logs/'
PATH_DATA = '../data/'
| 32.03125 | 102 | 0.768455 |
ace98e67984eedd5e4b3c10dd859e21f53a11f85 | 6,493 | py | Python | salt/states/debconfmod.py | lllamnyp/salt | de112e5b362191e3708e170b7eb8e990787ad412 | [
"Apache-2.0"
] | null | null | null | salt/states/debconfmod.py | lllamnyp/salt | de112e5b362191e3708e170b7eb8e990787ad412 | [
"Apache-2.0"
] | 9 | 2021-03-31T20:25:25.000Z | 2021-07-04T05:33:46.000Z | salt/states/debconfmod.py | lllamnyp/salt | de112e5b362191e3708e170b7eb8e990787ad412 | [
"Apache-2.0"
] | null | null | null | """
Management of debconf selections
================================
:depends: - debconf-utils package
The debconfmod state module manages the enforcement of debconf selections,
this state can set those selections prior to package installation.
Available Functions
-------------------
The debconfmod state has two functions, the ``set`` and ``set_file`` functions
set
Set debconf selections from the state itself
set_file
Set debconf selections from a file
.. code-block:: yaml
nullmailer-debconf:
debconf.set:
- name: nullmailer
- data:
'shared/mailname': {'type': 'string', 'value': 'server.domain.tld'}
'nullmailer/relayhost': {'type': 'string', 'value': 'mail.domain.tld'}
ferm-debconf:
debconf.set:
- name: ferm
- data:
'ferm/enable': {'type': 'boolean', 'value': True}
.. note::
Due to how PyYAML imports nested dicts (see :ref:`here <yaml-idiosyncrasies>`),
the values in the ``data`` dict must be indented four spaces instead of two.
If you're setting debconf values that requires `dpkg-reconfigure`, you can use
the ``onchanges`` requisite to reconfigure your package:
.. code-block:: yaml
set-default-shell:
debconf.set:
- name: dash
- data:
'dash/sh': {'type': 'boolean', 'value': false}
reconfigure-dash:
cmd.run:
- name: dpkg-reconfigure -f noninteractive dash
- onchanges:
- debconf: set-default-shell
Every time the ``set-default-shell`` state changes, the ``reconfigure-dash``
state will also run.
.. note::
For boolean types, the value should be ``true`` or ``false``, not
``'true'`` or ``'false'``.
"""
# Define the module's virtual name
__virtualname__ = "debconf"
def __virtual__():
"""
Confirm this module is on a Debian based system
"""
if __grains__["os_family"] != "Debian":
return (False, "debconf state only runs on Debian systems")
# Check that debconf was loaded
if "debconf.show" not in __salt__:
return (False, "debconf module could not be loaded")
return __virtualname__
def set_file(name, source, template=None, context=None, defaults=None, **kwargs):
"""
Set debconf selections from a file or a template
.. code-block:: yaml
<state_id>:
debconf.set_file:
- source: salt://pathto/pkg.selections
<state_id>:
debconf.set_file:
- source: salt://pathto/pkg.selections?saltenv=myenvironment
<state_id>:
debconf.set_file:
- source: salt://pathto/pkg.selections.jinja2
- template: jinja
- context:
some_value: "false"
source:
The location of the file containing the package selections
template
If this setting is applied then the named templating engine will be
used to render the package selections file, currently jinja, mako, and
wempy are supported
context
Overrides default context variables passed to the template.
defaults
Default context passed to the template.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if context is None:
context = {}
elif not isinstance(context, dict):
ret["result"] = False
ret["comment"] = "Context must be formed as a dict"
return ret
if defaults is None:
defaults = {}
elif not isinstance(defaults, dict):
ret["result"] = False
ret["comment"] = "Defaults must be formed as a dict"
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Debconf selections would have been set."
return ret
if template:
result = __salt__["debconf.set_template"](
source, template, context, defaults, **kwargs
)
else:
result = __salt__["debconf.set_file"](source, **kwargs)
if result:
ret["comment"] = "Debconf selections were set."
else:
ret["result"] = False
ret["comment"] = "Unable to set debconf selections from file."
return ret
def set(name, data, **kwargs):
"""
Set debconf selections
.. code-block:: yaml
<state_id>:
debconf.set:
- name: <name>
- data:
<question>: {'type': <type>, 'value': <value>}
<question>: {'type': <type>, 'value': <value>}
name:
The package name to set answers for.
data:
A set of questions/answers for debconf. Note that everything under
this must be indented twice.
question:
The question the is being pre-answered
type:
The type of question that is being asked (string, boolean, select, etc.)
value:
The answer to the question
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
current = __salt__["debconf.show"](name)
for (key, args) in data.items():
# For debconf data, valid booleans are 'true' and 'false';
# But str()'ing the args['value'] will result in 'True' and 'False'
# which will be ignored and overridden by a dpkg-reconfigure.
# So we should manually set these values to lowercase ones,
# before any str() call is performed.
if args["type"] == "boolean":
args["value"] = "true" if args["value"] else "false"
if current is not None and [key, args["type"], str(args["value"])] in current:
if ret["comment"] == "":
ret["comment"] = "Unchanged answers: "
ret["comment"] += ("{} ").format(key)
else:
if __opts__["test"]:
ret["result"] = None
ret["changes"][key] = ("New value: {}").format(args["value"])
else:
if __salt__["debconf.set"](name, key, args["type"], args["value"]):
if args["type"] == "password":
ret["changes"][key] = "(password hidden)"
else:
ret["changes"][key] = ("{}").format(args["value"])
else:
ret["result"] = False
ret["comment"] = "Some settings failed to be applied."
ret["changes"][key] = "Failed to set!"
if not ret["changes"]:
ret["comment"] = "All specified answers are already set"
return ret
| 29.38009 | 86 | 0.574311 |
ace990510bb1324859779410a0c258a7aa5d1377 | 22,362 | py | Python | src/board.py | Hegemege/minesweeper-solver-python | 1d1d5f9cf02dd2f8fbdbc4d2e9a3202083bd37d0 | [
"MIT"
] | null | null | null | src/board.py | Hegemege/minesweeper-solver-python | 1d1d5f9cf02dd2f8fbdbc4d2e9a3202083bd37d0 | [
"MIT"
] | null | null | null | src/board.py | Hegemege/minesweeper-solver-python | 1d1d5f9cf02dd2f8fbdbc4d2e9a3202083bd37d0 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import List, Callable, Optional, Tuple
from dataclasses import dataclass
from enum import Enum
import random
import sys
import numpy as np
import numpy.linalg
import scipy.optimize, scipy.linalg
import math
class BoardSolver(Enum):
# Fastest solver
# avg 14.1ms
# win 31.4%
ScipyLinalgLstsq = 0
# Best solver
# avg 200ms
# win 45%
ScipyOptimizeLsqLinear = 1
# Performs similar to ScipyLingalgLstsq but slower
ScipySparseLinalgLsqr = 2
# Similar performance and speed to ScipySparseLinalgLsqr
ScipySparseLinalgLsmr = 3
class BoardState(Enum):
Undefined = 0
Won = 1
Lost = 2
class CellState(Enum):
Closed = 0
Opened = 1
Flagged = 2
class CellDiscoveryState(Enum):
"""
Defines the discovery status of a cell.
Undefined: no additional information is known about the cell
Reached: opened neighboring cells offer information about the cell
Cleared: the state of the cell has been solved
"""
Undefined = 0
Reached = 1
Cleared = 2
class BoardGenerationSettings:
mines: int
seed: Optional[int]
start_position: Optional[Tuple[int, int]]
force_start_area: Optional[bool]
def __init__(self, mines, seed=None, start_position=None, force_start_area=None):
self.mines = mines
self.seed = seed
self.start_position = start_position
self.force_start_area = force_start_area
@dataclass
class Cell:
__slots__ = [
"x",
"y",
"mine",
"neighbor_mine_count",
"neighbor_flag_count",
"neighbor_opened_count",
"neighbor_count",
"neighbors",
"state",
"discovery_state",
"satisfied",
]
x: int
y: int
mine: bool
neighbor_mine_count: int
neighbor_flag_count: int
neighbor_opened_count: int
neighbor_count: int
neighbors: List[Cell]
state: CellState
discovery_state: CellDiscoveryState
satisfied: bool
def __init__(self, x, y):
self.x = x
self.y = y
self.neighbors = []
self.reset()
def reset(self):
self.mine = False
self.neighbor_mine_count = 0
self.neighbor_flag_count = 0
self.neighbor_opened_count = 0
self.state = CellState.Closed
self.discovery_state = CellDiscoveryState.Undefined
self.satisfied = False
def update_satisfied(self):
"""
Updates the cell as satisfied if the conditions for it are met
"""
if self.satisfied:
return
if (
self.state == CellState.Flagged
or self.neighbor_mine_count == self.neighbor_flag_count
or (
self.neighbor_mine_count
== self.neighbor_count - self.neighbor_opened_count
)
):
self.satisfied = True
def str_real(self):
return (
"█"
if self.mine
else (
" " if self.neighbor_mine_count == 0 else str(self.neighbor_mine_count)
)
)
def str_revealed(self, hide=False):
if self.mine and self.state == CellState.Opened:
return "x"
elif self.state == CellState.Flagged:
return "■"
elif self.satisfied and hide:
return " "
elif self.state == CellState.Closed:
return "█"
elif self.state == CellState.Opened:
return (
" " if self.neighbor_mine_count == 0 else str(self.neighbor_mine_count)
)
class Board:
grid: List[List[Cell]]
width: int
height: int
state: BoardState
opened_cells: int
flagged_cells: int
generated_mines: int
settings: BoardGenerationSettings
unknown_cell_lookup: dict
debug: bool
solver: BoardSolver
# The board is intended to be reused, no constructor required
def __init__(self):
self.grid = None
self.width = 0
self.height = 0
self.reset()
def reset(self):
self.state = BoardState.Undefined
self.opened_cells = 0
self.flagged_cells = 0
self.generated_mines = 0
self.settings = None
self.unknown_cell_lookup = {}
self.solver = None
def configure_and_solve(
self,
width: int,
height: int,
settings: BoardGenerationSettings,
solver=BoardSolver.ScipyLinalgLstsq,
debug=False,
):
"""
Configure and solve the board with the given settings and solver.
The solver is used to find vector x from Ax = b
"""
start_position = self.configure(width, height, settings, solver, debug)
self.solve(start_position)
def configure(
self,
width: int,
height: int,
settings: BoardGenerationSettings,
solver=BoardSolver.ScipyLinalgLstsq,
debug=False,
):
"""
Configures the board with the given settings and generates mines.
Returns the starting position as a Tuple[int, int]
"""
self.reset()
self.width = width
self.height = height
self.settings = settings
self.debug = debug
self.solver = solver
reconfigure = (
self.grid is None or len(self.grid) != height or len(self.grid[0]) != width
)
# Reset the grid data if needed
if reconfigure:
self.grid = [[Cell(i, j) for i in range(width)] for j in range(height)]
self.link_neighbors()
self.reset_cells()
start_position = self.generate_mines(settings)
return start_position
def solve(self, start_position):
"""
Solves the board from its current state using the given start position that will be opened.
"""
if self.debug:
print("Solving with seed", self.settings.seed)
non_mine_cell_count = self.width * self.height - self.generated_mines
# Keep track of remaining unsatisfied/solved cells
remaining_cells: List[Cell] = [cell for row in self.grid for cell in row]
# Keep track of active cells and perform first-order solving
# A cell is active if one of it's neighbors has been opened
# or if the cell itself has been opened
active_cells: List[Cell] = []
# Build a lookup (x, y) -> (cell, index) for unknown cells
# The index will be updated manually and when cells are flagged or opened, they
# are removed from the lookup
self.unknown_cell_lookup = {}
for cell in remaining_cells:
self.unknown_cell_lookup[(cell.x, cell.y)] = [cell, 0]
# Open the start position
self.open_at(start_position[0], start_position[1])
# Main loop
while self.state == BoardState.Undefined:
# Test win condition
if self.opened_cells == non_mine_cell_count:
self.state = BoardState.Won
break
# Update remaining and active cells
remaining_cells = [cell for cell in remaining_cells if not cell.satisfied]
active_cells = [
cell
for cell in remaining_cells
if cell.neighbor_opened_count > 0 or cell.state == CellState.Opened
]
# Loop through active cells and attempt first-order solving
# If no cells were changed, perform second-order solving
# for active cells only.
# If no cells were changed after second-order solving for
# active cells, attempt second-order solving for all cells
# and perform epsilon tests and find least probable cell to
# contain a mine for a random guess if needed
solved_active = False
# print(len(active_cells))
for cell in active_cells:
cell_flag_satisfied = (
cell.neighbor_mine_count == cell.neighbor_flag_count
)
cell_flag_remaining = (
cell.neighbor_mine_count
== cell.neighbor_count - cell.neighbor_opened_count
)
# If an opened cell has been satisfied, open remaining neighboring unflagged cells
if cell_flag_satisfied and cell.state == CellState.Opened:
solved_active = True
for neighbor in cell.neighbors:
if neighbor.state == CellState.Closed:
self.open_cell(neighbor)
cell.update_satisfied()
# If an opened cell has the same number of unopened squares
# as the neighboring mine count, flag all neighbors
if cell_flag_remaining and cell.state == CellState.Opened:
solved_active = True
for neighbor in cell.neighbors:
if neighbor.state == CellState.Closed:
self.flag_cell(neighbor)
cell.update_satisfied()
# Do not perform second-order solving if first-order solving is sufficient
if solved_active:
continue
solved_active = self.solve_complex(active_cells)
if solved_active:
continue
self.solve_complex(remaining_cells, True, True)
def solve_complex(self, cells: List[Cell], include_total=False, guess=False):
"""
Form the required matrix and vector to solve
Ax = b
Matrix A [m*n] has columns for each active unopened cell and
rows for each active opened cell with 1s where the row's cell is
adjacent to the column's cell and otherwise 0
Vector B [1*n] has the remaining unflagged mine count for each opened cell
(row) of the matrix
Vector X [m*1] will have values indicating the existence of mines
If include_total is True, add a row [1, 1,..., 1]: mines_left
to the matrix to get some probability value for every closed cell
"""
# Update the unknown lookup with proper indices
# Rows can be added to the matrix in the order they are in active_cells
solved_active = False
unknown_index = 0
known_count = 0
for cell in cells:
if cell.state == CellState.Closed:
self.unknown_cell_lookup[(cell.x, cell.y)][1] = unknown_index
unknown_index += 1
if cell.state == CellState.Opened:
known_count += 1
unknown_count = unknown_index
# If the unknown count or known count is 0, there are pockets of
# cells that are not reachable from current boundary
# Without adding the total row, this is impossible to solve
# If this was reached during the first solve_complex, pass the execution to the next
if not include_total and (unknown_count == 0 or known_count == 0):
return False
# unknown_index is now the count of unknowns
A_matrix = [[0 for i in range(unknown_count)] for j in range(known_count)]
B_vector = [
cell.neighbor_mine_count - cell.neighbor_flag_count
for cell in cells
if cell.state == CellState.Opened
]
# Write values to the A matrix
known_index = 0
for cell in cells:
if cell.state != CellState.Opened:
continue
for neighbor in cell.neighbors:
if neighbor.state != CellState.Closed:
continue
key = (neighbor.x, neighbor.y)
unknown_index = self.unknown_cell_lookup[key][1]
row_index = known_index
A_matrix[row_index][unknown_index] = 1
known_index += 1
# Add the 1 1 1 ... 1 = remaining_mines row at the bottom
if include_total:
B_vector.append(self.generated_mines - self.flagged_cells)
A_matrix.append([1 for i in range(unknown_count)])
# Different attempts at libraries for solving Ax = b
# Find a least-squres solution to the equation
# X_vector, residuals, rank, singular_values = numpy.linalg.lstsq(
# A_matrix, B_vector, rcond=None
# )
# Find a non-negative least-squares solution to the equation
# X_vector, residual = scipy.optimize.nnls(A_matrix, B_vector)
# PROBLEM: Returns only 0's and 1's, not anything in between
# -> reports uncertain cells as mines or non-mines
if self.solver == BoardSolver.ScipyLinalgLstsq:
# Find a least-squares solution to the equation
X_vector, residuals, rank, singular_values = scipy.linalg.lstsq(
A_matrix, B_vector, check_finite=False
)
elif self.solver == BoardSolver.ScipyOptimizeLsqLinear:
# Find a least-squares solution with constraints
# method="trf" (default) gets stuck in infinite loop with default lsq_solver
# method="bvls" gets weird errors:
# ValueError: zero-size array to reduction operation maximum which has no identity
optimize_result = scipy.optimize.lsq_linear(
A_matrix, B_vector, bounds=(0.0, 1.0), method="trf", lsq_solver="lsmr"
)
X_vector = optimize_result.x
elif self.solver == BoardSolver.ScipySparseLinalgLsqr:
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lsqr.html
# TODO: Attempt better performance by setting initial guess x0
X_vector = scipy.sparse.linalg.lsqr(
np.array(A_matrix), np.array(B_vector), show=False
)[0]
elif self.solver == BoardSolver.ScipySparseLinalgLsmr:
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lsmr.html
X_vector = scipy.sparse.linalg.lsmr(
np.array(A_matrix), np.array(B_vector), show=False
)[0]
else:
print("No solver configured")
exit()
# Clean the data
for index, value in enumerate(X_vector):
# If the value is close to 0 or truly negative
if abs(value) < 0.0001:
X_vector[index] = 0
if abs(value - 1) < 0.0001:
X_vector[index] = 1
# Find sure mines to flag or cells to open
# Find the least probable cell for guessing, if needed
least_probable_cell = None
least_probability = math.inf
for cell in cells:
if cell.state != CellState.Closed:
continue
unknown_index = self.unknown_cell_lookup[(cell.x, cell.y)][1]
if X_vector[unknown_index] == 1:
solved_active = True
self.flag_cell(cell)
elif X_vector[unknown_index] == 0:
solved_active = True
self.open_cell(cell)
# Find a smallest valid probability (> 0)
if (
least_probability > X_vector[unknown_index]
and X_vector[unknown_index] > 0
):
least_probability = X_vector[unknown_index]
least_probable_cell = cell
# print(cell.x, cell.y, X_vector[unknown_index])
# Last resort, pick the least probable cell in X_vector to open
if not solved_active and guess:
self.open_cell(least_probable_cell)
if self.debug:
if least_probable_cell.mine:
print("Guessed wrong with probability", least_probability)
print("Remaining mines", self.generated_mines - self.flagged_cells)
print("X_vector:")
for cell in cells:
if cell.state == CellState.Closed or (
cell.state == CellState.Opened and cell.mine
):
unknown_index = self.unknown_cell_lookup[(cell.x, cell.y)][
1
]
print(cell.x, cell.y, X_vector[unknown_index])
else:
print("Guessed right with probability", least_probability)
print(least_probable_cell.x, least_probable_cell.y)
print()
print(self.str_revealed())
print()
return solved_active
def flag_at(self, x, y):
cell = self.grid[y][x]
self.flag_cell(cell)
def flag_cell(self, cell):
if cell.state != CellState.Closed:
return
# print("Flag", cell.x, cell.y, cell.neighbor_mine_count, cell.mine)
cell.state = CellState.Flagged
for neighbor in cell.neighbors:
neighbor.neighbor_flag_count += 1
del self.unknown_cell_lookup[(cell.x, cell.y)]
self.flagged_cells += 1
cell.update_satisfied()
def open_at(self, x, y):
cell = self.grid[y][x]
self.open_cell(cell)
def open_cell(self, cell):
if cell.state != CellState.Closed:
return
# print("Open", cell.x, cell.y, cell.neighbor_mine_count, cell.mine)
cell.state = CellState.Opened
self.opened_cells += 1
# Test lose condition
if cell.mine:
if self.debug:
print("Opened mine at", cell.x, cell.y)
self.state = BoardState.Lost
return
del self.unknown_cell_lookup[(cell.x, cell.y)]
cell_flag_satisfied = cell.neighbor_mine_count == cell.neighbor_flag_count
cell_flag_remaining = (
cell.neighbor_mine_count == cell.neighbor_count - cell.neighbor_opened_count
)
# Inform neighbors that the cell has been opened
# Also perform quick-opens and flags for neighbors
# since we are already looping through them here
for neighbor in cell.neighbors:
neighbor.neighbor_opened_count += 1
# Opening a cell that is fully satisfied opens neighbors
if cell_flag_satisfied and neighbor.state == CellState.Closed:
self.open_cell(neighbor)
# Opening a cell that only has N mines around it and only
# N unopened cells remaining flags them all
if cell_flag_remaining and neighbor.state == CellState.Closed:
self.flag_cell(neighbor)
cell.update_satisfied()
def link_neighbors(self) -> None:
for y, row in enumerate(self.grid):
for x, cell in enumerate(row):
if y > 0:
cell.neighbors.append(self.grid[y - 1][x])
if x > 0:
cell.neighbors.append(self.grid[y - 1][x - 1])
if x < self.width - 1:
cell.neighbors.append(self.grid[y - 1][x + 1])
if x > 0:
cell.neighbors.append(self.grid[y][x - 1])
if x < self.width - 1:
cell.neighbors.append(self.grid[y][x + 1])
if y < self.height - 1:
cell.neighbors.append(self.grid[y + 1][x])
if x > 0:
cell.neighbors.append(self.grid[y + 1][x - 1])
if x < self.width - 1:
cell.neighbors.append(self.grid[y + 1][x + 1])
cell.neighbor_count = len(cell.neighbors)
def reset_cells(self) -> None:
for row in self.grid:
for cell in row:
cell.reset()
def generate_mines(self, settings: BoardGenerationSettings) -> None:
# Seeds the RNG from settings. If None, assign a seed
# since the current seed cannot be retrieved from random
if settings.seed is None:
settings.seed = random.randrange(sys.maxsize)
random.seed(settings.seed)
if settings.start_position is not None:
start_position = settings.start_position
else:
start_position = (
random.randrange(0, self.width),
random.randrange(0, self.height),
)
# Generate a list of all random positions
valid_positions: List[Tuple[int, int]] = []
for j in range(self.height):
for i in range(self.width):
# Do not generate a mine on the start position
if i == start_position[0] and j == start_position[1]:
continue
# Do not generate mines neighboring the start position
# if the force_start_area setting is enabled
if settings.force_start_area:
if (
i >= start_position[0] - 1
and i <= start_position[0] + 1
and j >= start_position[1] - 1
and j <= start_position[1] + 1
):
continue
valid_positions.append((i, j))
mine_count = min(settings.mines, len(valid_positions))
mine_positions = random.sample(valid_positions, mine_count)
for position in mine_positions:
x, y = position
self.grid[y][x].mine = True
self.generated_mines += 1
for neighbor in self.grid[y][x].neighbors:
neighbor.neighbor_mine_count += 1
return start_position
def str_real(self):
return "\n".join(
["".join([cell.str_real() for cell in row]) for row in self.grid]
)
def str_revealed(self, hide=False):
return "\n".join(
["".join([cell.str_revealed(hide) for cell in row]) for row in self.grid]
)
def get_result(self):
return BoardResult(self.width, self.height, self.generated_mines, self.state)
@dataclass
class BoardResult:
__slots__ = ["width", "height", "mines", "state"]
width: int
height: int
mines: int
state: BoardState
| 33.984802 | 103 | 0.575709 |
ace9909e47ce950503e9b9ca1ae2e322fd602ece | 11,467 | py | Python | tests/test_goodvibes_helper.py | team-mayes/nrel_tools | 551f92f2c5448e7888bb2fb11bd04243b26da4a9 | [
"MIT"
] | 1 | 2021-05-26T15:29:45.000Z | 2021-05-26T15:29:45.000Z | tests/test_goodvibes_helper.py | team-mayes/nrel_tools | 551f92f2c5448e7888bb2fb11bd04243b26da4a9 | [
"MIT"
] | 2 | 2020-08-12T17:05:01.000Z | 2021-05-30T00:32:29.000Z | tests/test_goodvibes_helper.py | team-mayes/nrel_tools | 551f92f2c5448e7888bb2fb11bd04243b26da4a9 | [
"MIT"
] | null | null | null | import unittest
import os
from gaussian_wrangler.goodvibes_helper import main
from common_wrangler.common import silent_remove, capture_stdout, capture_stderr, diff_lines
import logging
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
DISABLE_REMOVE = logger.isEnabledFor(logging.DEBUG)
__author__ = 'hmayes'
TEST_DIR = os.path.dirname(__file__)
MAIN_DIR = os.path.dirname(TEST_DIR)
DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data')
SUB_DATA_DIR = os.path.join(DATA_DIR, 'goodvibes_helper')
TEMP_DIR = os.path.join(SUB_DATA_DIR, 'temp_dir')
GOODVIBES_DAT = os.path.abspath(os.path.join(TEST_DIR, '..', 'Goodvibes_output.dat'))
GOODVIBES_CSV = os.path.abspath(os.path.join(TEST_DIR, '..', 'Goodvibes_output.csv'))
UNI_REACT = os.path.join(SUB_DATA_DIR, 'ipaegh1dts_t_ircr_opt.log')
UNI_TS = os.path.join(SUB_DATA_DIR, 'ipaegh1dts.log')
FILE_LIST = os.path.join(SUB_DATA_DIR, 'list.txt')
FILE_LIST_MISMATCH_SOLV = os.path.join(SUB_DATA_DIR, 'list_mismatch_solv.txt')
FILE_LIST_MISMATCH_STOICH = os.path.join(SUB_DATA_DIR, 'list_mismatch_stoich.txt')
AE_OUT_BASE_NAME = 'aea_out.csv'
AE_OUT = os.path.join(SUB_DATA_DIR, AE_OUT_BASE_NAME)
AE_OUT_SUB_DIR = os.path.join(TEMP_DIR, AE_OUT_BASE_NAME)
GOOD_AE_OUT = os.path.join(SUB_DATA_DIR, 'aea_out_good.csv')
BI_LIST = os.path.join(SUB_DATA_DIR, 'list_bimolec.txt')
GOOD_AE_BI_OUT = os.path.join(SUB_DATA_DIR, 'aea_out_bi_good.csv')
BI_VIBES_OUT1 = os.path.join(SUB_DATA_DIR, 'ethygly2_tzvp_vibes.dat')
BI_VIBES_OUT2 = os.path.join(SUB_DATA_DIR, 'pdc2_h_vibes.dat')
BI_VIBES_OUT3 = os.path.join(SUB_DATA_DIR, 'pdc2_eghtsct_vibes.dat')
TI_LIST = os.path.join(SUB_DATA_DIR, 'list_ti.txt')
TI_OUT = os.path.join(SUB_DATA_DIR, 'list_ti.csv')
GOOD_AE_TI_OUT = os.path.join(SUB_DATA_DIR, 'aea_out_ti_good.csv')
AEA_VIBES_OUT = os.path.join(SUB_DATA_DIR, 'aea_out_vibes.dat')
AEA_PROD_VIBES_OUT = os.path.join(SUB_DATA_DIR, 'aea_prod_vibes.dat')
TPA_LIST = os.path.join(SUB_DATA_DIR, 'tpa_testing.txt')
TPA_OUT = os.path.join(SUB_DATA_DIR, 'tpa_testing.csv')
TPA_VIBES_OUT = os.path.join(SUB_DATA_DIR, 'tpa_testing_vibes.dat')
GOOD_TPA_OUT = os.path.join(SUB_DATA_DIR, 'aea_out_tpa_good.csv')
GOOD_TPA_SCALED_OUT = os.path.join(SUB_DATA_DIR, 'tpa_testing_good.csv')
TEST_LOG = os.path.join(SUB_DATA_DIR, 'tpaegh1ats_ts.log')
PROD_LIST = os.path.join(SUB_DATA_DIR, 'list_prod.txt')
PROD_OUT = os.path.join(SUB_DATA_DIR, 'aea_prod.csv')
GOOD_PROD_OUT = os.path.join(SUB_DATA_DIR, 'aea_prod_good.csv')
GOOD_CO_PROD_OUT = os.path.join(SUB_DATA_DIR, 'aea_prod_w_co_good.csv')
PROD_NO_TS_LIST = os.path.join(SUB_DATA_DIR, 'list_prod_no_ts.txt')
GOOD_PROD_NO_TS_OUT = os.path.join(SUB_DATA_DIR, 'aea_prod_no_ts_good.csv')
PLOT_LIST = os.path.join(SUB_DATA_DIR, 'list_plot.txt')
PLOT1 = os.path.join(SUB_DATA_DIR, 'aea_out_g.png')
PLOT2 = os.path.join(SUB_DATA_DIR, 'aea_out_g_qh.png')
PLOT3 = os.path.join(SUB_DATA_DIR, 'aea_out_h.png')
PLOT4 = os.path.join(SUB_DATA_DIR, 'aea_out_h_qh.png')
MISSING_PROD_LIST = os.path.join(SUB_DATA_DIR, 'list_missing_one_prod.txt')
MULT_TS_LIST = os.path.join(SUB_DATA_DIR, 'list_mult_ts.txt')
LIST_W_CO = os.path.join(SUB_DATA_DIR, 'list_with_1_freq.txt')
class TestGoodVibesHelperNoOut(unittest.TestCase):
# These all test failure cases
def testNoArgs(self):
test_input = []
# main(test_input)
with capture_stderr(main, test_input) as output:
self.assertTrue("No files" in output)
def testHelp(self):
test_input = ['-h']
# main(test_input)
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stderr(main, test_input) as output:
self.assertFalse(output)
with capture_stdout(main, test_input) as output:
self.assertTrue("optional arguments" in output)
def testNoneFloatVib(self):
test_input = ["-l", TPA_LIST, "-d", SUB_DATA_DIR, "-t", "-v", "ghost"]
# main(test_input)
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stderr(main, test_input) as output:
self.assertTrue("not convert string" in output)
with capture_stdout(main, test_input) as output:
self.assertTrue("optional arguments" in output)
class TestGoodVibesHelperInputError(unittest.TestCase):
def testNoSuchFile(self):
test_input = ["ghost.log"]
try:
# main(test_input)
with capture_stderr(main, test_input) as output:
self.assertTrue("Problems reading file" in output)
finally:
silent_remove(GOODVIBES_CSV, disable=DISABLE_REMOVE)
pass
def testMisMatchStoich(self):
test_input = ["-l", FILE_LIST_MISMATCH_STOICH]
try:
main(test_input)
with capture_stderr(main, test_input) as output:
self.assertTrue("Check stoichiometries" in output)
finally:
silent_remove(GOODVIBES_CSV, disable=DISABLE_REMOVE)
pass
def testMixMatchSolvent(self):
test_input = ["-l", FILE_LIST_MISMATCH_SOLV]
try:
# main(test_input)
with capture_stderr(main, test_input) as output:
print("Output: ", output)
self.assertTrue("Different solvents" in output)
finally:
silent_remove(GOODVIBES_CSV, disable=DISABLE_REMOVE)
pass
def testMixMatchTheory(self):
test_input = [UNI_REACT, UNI_TS]
try:
# main(test_input)
with capture_stderr(main, test_input) as output:
self.assertTrue("Different basis sets" in output)
finally:
silent_remove(GOODVIBES_CSV, disable=DISABLE_REMOVE)
pass
def testMissingProd(self):
test_input = ["-l", MISSING_PROD_LIST]
with capture_stderr(main, test_input) as output:
self.assertTrue("Check stoichiometries of reactant(s) and product(s)" in output)
def testMultTS(self):
test_input = ["-l", MULT_TS_LIST]
# main(test_input)
with capture_stderr(main, test_input) as output:
self.assertTrue("Unexpectedly found an imaginary frequency" in output)
class TestGoodVibesHelper(unittest.TestCase):
# These test/demonstrate different options
def testTwoUni(self):
test_input = ["-l", FILE_LIST, "-d", TEMP_DIR, "-q", "-o", AE_OUT_BASE_NAME, "-f", "100"]
try:
silent_remove(TEMP_DIR, dir_with_files=True)
main(test_input)
self.assertFalse(diff_lines(AE_OUT_SUB_DIR, GOOD_AE_OUT))
finally:
silent_remove(TEMP_DIR, disable=DISABLE_REMOVE, dir_with_files=True)
pass
def testBimolecular(self):
# checks a bimolecular reaction and also saving GoodVibes output for each file
test_input = ["-l", BI_LIST, "-d", SUB_DATA_DIR, "-s", "-o", AE_OUT, "-f", "100"]
# make sure files not left from a previous run
for fname in [BI_VIBES_OUT1, BI_VIBES_OUT2, BI_VIBES_OUT3]:
silent_remove(fname)
try:
main(test_input)
self.assertFalse(diff_lines(AE_OUT, GOOD_AE_BI_OUT))
for fname in [BI_VIBES_OUT1, BI_VIBES_OUT2, BI_VIBES_OUT3]:
self.assertTrue(os.path.exists(fname))
finally:
for fname in [BI_VIBES_OUT1, BI_VIBES_OUT2, BI_VIBES_OUT3]:
silent_remove(fname, disable=DISABLE_REMOVE)
silent_remove(AE_OUT, disable=DISABLE_REMOVE)
pass
def testTi(self):
# check handles it when not all atoms in are in all molecules
# also checks saving GoodVibes output together
test_input = ["-l", TI_LIST, "-d", SUB_DATA_DIR, "-t", "-o", AE_OUT]
silent_remove(AEA_VIBES_OUT)
try:
main(test_input)
self.assertFalse(diff_lines(AE_OUT, GOOD_AE_TI_OUT))
self.assertTrue(os.path.exists(AEA_VIBES_OUT))
finally:
silent_remove(AE_OUT, disable=DISABLE_REMOVE)
silent_remove(AEA_VIBES_OUT, disable=DISABLE_REMOVE)
pass
def testTPA(self):
# check handles it when not all atoms in are in all molecules
# also checks saving GoodVibes output together
test_input = ["-l", TPA_LIST, "-d", SUB_DATA_DIR, "-t", "-f", "100"]
try:
main(test_input)
self.assertFalse(diff_lines(TPA_OUT, GOOD_TPA_OUT))
finally:
silent_remove(TPA_OUT, disable=DISABLE_REMOVE)
silent_remove(TPA_VIBES_OUT, disable=DISABLE_REMOVE)
pass
def testTPAAltVib(self):
test_input = ["-l", TPA_LIST, "-d", SUB_DATA_DIR, "-t", "-v", "0.984", "-f", "100"]
try:
main(test_input)
self.assertFalse(diff_lines(TPA_OUT, GOOD_TPA_SCALED_OUT))
finally:
silent_remove(TPA_OUT, disable=DISABLE_REMOVE)
silent_remove(TPA_VIBES_OUT, disable=DISABLE_REMOVE)
pass
def testReactTSProd(self):
# check handles it when not all atoms in are in all molecules
# also checks saving GoodVibes output together
test_input = ["-l", PROD_LIST, "-d", SUB_DATA_DIR, "-o", "aea_prod.csv", "-t",
"-ti", "300,600,25", "--temp", "500", "-f", "100"]
try:
main(test_input)
self.assertFalse(diff_lines(PROD_OUT, GOOD_PROD_OUT))
finally:
silent_remove(PROD_OUT, disable=DISABLE_REMOVE)
silent_remove(AEA_PROD_VIBES_OUT, disable=DISABLE_REMOVE)
pass
def testMoleculeWithOneFreq(self):
test_input = ["-l", LIST_W_CO, "-d", SUB_DATA_DIR, "-o", "aea_prod.csv", "-t",
"-ti", "688.15,888.15,25", "--temp", "788.15", "-f", "0", "-v", "1.0"]
try:
main(test_input)
self.assertFalse(diff_lines(PROD_OUT, GOOD_CO_PROD_OUT))
finally:
silent_remove(PROD_OUT, disable=DISABLE_REMOVE)
silent_remove(AEA_PROD_VIBES_OUT, disable=DISABLE_REMOVE)
pass
def testReactProd(self):
# check handles it when not all atom types in are in all molecules
# also checks saving GoodVibes output together
test_input = ["-l", PROD_NO_TS_LIST, "-d", SUB_DATA_DIR, "-o", "aea_prod.csv",
"-ti", "300.15,600.15,25", "--temp", "500.15", "-t", "-f", "100"]
try:
main(test_input)
self.assertFalse(diff_lines(PROD_OUT, GOOD_PROD_NO_TS_OUT))
finally:
silent_remove(AEA_PROD_VIBES_OUT, disable=DISABLE_REMOVE)
silent_remove(PROD_OUT, disable=DISABLE_REMOVE)
pass
def testPlot(self):
# check handles it when not all atoms in are in all molecules
# also checks saving GoodVibes output together
plot_list = [PLOT1, PLOT2, PLOT3, PLOT4]
for fname in plot_list:
silent_remove(fname)
test_input = ["-l", PLOT_LIST, "-d", SUB_DATA_DIR, "-p", "-pl", "pdc2,ipa", "-o", AE_OUT,
"-ti", "400,500,25", "--temp", "500", "-q", "-f", "100"]
try:
main(test_input)
for fname in plot_list:
self.assertTrue(os.path.exists(fname))
finally:
silent_remove(AE_OUT, disable=DISABLE_REMOVE)
for fname in plot_list:
silent_remove(fname)
pass
| 41.547101 | 97 | 0.655359 |
ace9927f96f7d5c3e911be8c89c64367b05e6fce | 732 | py | Python | source/catalog/forms.py | nick-provost/PrimeDelivery | 08b606abd5ff3431884ee1abb259fba7148b4321 | [
"BSD-3-Clause"
] | null | null | null | source/catalog/forms.py | nick-provost/PrimeDelivery | 08b606abd5ff3431884ee1abb259fba7148b4321 | [
"BSD-3-Clause"
] | null | null | null | source/catalog/forms.py | nick-provost/PrimeDelivery | 08b606abd5ff3431884ee1abb259fba7148b4321 | [
"BSD-3-Clause"
] | null | null | null | from django import forms
from .models import *
class AddToCatalogForm(forms.Form):
class Meta:
model = SponsorCatalogItem
fields = ['catalog_item']
class AddToCartForm(forms.Form):
class Meta:
model = SponsorCatalogItem
fields = ['catalog_item']
class DeleteFromCatalogForm(forms.Form):
class Meta:
model = SponsorCatalogItem
fields = ['catalog_item']
class RemoveFromCartForm(forms.Form):
class Meta:
model = CartItem
fields = ['item']
class ViewCatalogAsAdminForm(forms.Form):
class Meta:
model = SponsorOrganization
fields = ['sponsor']
sponsor = forms.ModelChoiceField(queryset=SponsorOrganization.objects.all())
| 24.4 | 80 | 0.669399 |
ace993c57978d18567918cc475d165d5fbee28e3 | 409 | py | Python | flask_potholes/insert_values_in_db.py | mihaeladimovska1/KnoxvilleHackathon | 8496ba03041e3ed36bc111e94e35e5c5913c841f | [
"MIT"
] | null | null | null | flask_potholes/insert_values_in_db.py | mihaeladimovska1/KnoxvilleHackathon | 8496ba03041e3ed36bc111e94e35e5c5913c841f | [
"MIT"
] | null | null | null | flask_potholes/insert_values_in_db.py | mihaeladimovska1/KnoxvilleHackathon | 8496ba03041e3ed36bc111e94e35e5c5913c841f | [
"MIT"
] | null | null | null | from flask_pot import db
from flask_pot.models import Potholes
import datetime
#db.create_all()
potholes_locations = ['1700 Cumberland Ave., Knoxville']
for pl in potholes_locations:
pothole = Potholes(location=pl, size=0.5, depth=0.3, serviced=0, date_created=datetime.datetime.now().date()- datetime.timedelta(days=2))
db.session.add(pothole)
db.session.commit()
print(Potholes.query.all())
| 29.214286 | 142 | 0.753056 |
ace993d9911e971e5a7223b8b7c0690ceb5b708e | 3,802 | py | Python | contrib/macdeploy/custom_dsstore.py | ozmode/BitcoinFlex | 1e342de2bec36bf92a22480da2f9ed80500db67f | [
"MIT"
] | 3 | 2021-03-17T20:18:29.000Z | 2021-08-09T13:43:28.000Z | contrib/macdeploy/custom_dsstore.py | ozmode/BitcoinFlex | 1e342de2bec36bf92a22480da2f9ed80500db67f | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | ozmode/BitcoinFlex | 1e342de2bec36bf92a22480da2f9ed80500db67f | [
"MIT"
] | 3 | 2020-06-01T10:47:49.000Z | 2021-04-16T03:02:32.000Z | #!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinflexuser:\x00Documents:\x00bitcoinflex:\x00bitcoinflex:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinflexuser/Documents/bitcoinflex/bitcoinflex/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['BitcoinFlex-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 62.327869 | 1,817 | 0.729353 |
ace994dc586521ea7d522fe54175a637c28992a2 | 3,236 | py | Python | caldera/tests/test_planner_complexity.py | wietze/caldera | 1fc20cffc64a73c4ae277df677db433781b2abde | [
"Apache-2.0",
"CC0-1.0"
] | 3 | 2019-08-01T14:58:33.000Z | 2021-01-11T09:27:23.000Z | caldera/tests/test_planner_complexity.py | wietze/caldera | 1fc20cffc64a73c4ae277df677db433781b2abde | [
"Apache-2.0",
"CC0-1.0"
] | 3 | 2021-03-20T05:17:06.000Z | 2021-06-02T03:19:53.000Z | caldera/tests/test_planner_complexity.py | wietze/caldera | 1fc20cffc64a73c4ae277df677db433781b2abde | [
"Apache-2.0",
"CC0-1.0"
] | 1 | 2019-02-25T18:21:34.000Z | 2019-02-25T18:21:34.000Z | import unittest
import time
import logging
import cProfile as profile
from caldera.app.logic.planner import PlannerContext, eval_plan
from caldera.app.logic.logic import Term
from caldera.app.operation.operation_steps import all_steps
from caldera.app.operation.operation import _database_objs, OPShare
from caldera.app.util import relative_path
from caldera.app.logic.pydatalog_logic import DatalogContext as LogicContext
logging.basicConfig(level=logging.DEBUG)
class TestPlanner(unittest.TestCase):
def __init__(self, *args, **kwargs):
with open(relative_path(__file__, '10_host_dataset.txt')) as f:
dataset = f.readlines()
self.dataset = []
for fact in sorted(dataset, key=lambda x: x.count(',')):
term = Term(fact.strip())
term.literals = list(True if x == "True" else False if x == "False" else x for x in term.literals)
self.dataset.append(term)
super().__init__(*args, **kwargs)
def setUp(self):
self.context = PlannerContext(LogicContext())
def tearDown(self):
self.context.close()
def test_main(self):
# pr = profile.Profile()
# pr.disable()
# this code converts all of the steps in all_steps
for step in all_steps:
if step.__name__ in ["GetComputers", "WMI_remote_pc", "Copy", "Credentials", "GetAdmin", "GetDomain",
"HKLMRunKeyPersist", "NetUse"]:
self.context.add_step(step)
# load types into the planner
for obj in _database_objs:
primary = obj != OPShare
self.context.define_type(obj.__name__, primary=primary)
for fact in self.dataset:
self.context.assert_fact(fact)
format_str = "{!s:<5} {!s:<5} {!s:<5} {!s:<15} {}"
plan_length = 2
t1 = time.process_time()
#pr.enable()
plans = self.context.plan(None, plan_length)
#pr.disable()
t2 = time.process_time()
print("Planning time took: {} seconds".format(t2 - t1))
#pr.dump_stats('profile.pstat')
countnone = 0
for item in plans:
if not item:
countnone += 1
assert countnone == 0
if len(plans) == 0:
print("len == 0")
print("")
print("Ran out of plans, dumping facts:")
self.context.print_dump()
else:
best_plan_action = self.context.actions[plans[0][0][0]]
best_plan_parameters = plans[0][0][1]
best_plan_score = eval_plan(plans[0])
print(format_str.format("plans", 'facts', "score", "action", "parameters"))
print(format_str.format(str(len(plans))[:5], str(len(self.context.facts))[:5],
str(best_plan_score)[:5], best_plan_action.name[:15], best_plan_parameters))
self.print_plan(plans[0])
def print_plan(self, plan):
def print_action(action, parameters):
print("{}{}".format(action.name, parameters))
for step in plan:
action = self.context.actions[step[0]]
parameters = step[1]
print_action(action, parameters)
print(" -> ")
| 33.708333 | 113 | 0.598578 |
ace995065f8768e4e246510a85f17604d4a960be | 561 | py | Python | barlink/setup.py | TkkrLab/barsystem | 17d138f19c8f6a61b14477f034d8519bb83e00fb | [
"MIT"
] | 1 | 2016-03-28T16:19:53.000Z | 2016-03-28T16:19:53.000Z | barlink/setup.py | TkkrLab/barsystem | 17d138f19c8f6a61b14477f034d8519bb83e00fb | [
"MIT"
] | 11 | 2015-09-22T20:34:34.000Z | 2017-04-12T13:55:27.000Z | barlink/setup.py | TkkrLab/barsystem | 17d138f19c8f6a61b14477f034d8519bb83e00fb | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='barlink',
version='1.0.0',
packages=find_packages('src'),
package_dir={'': 'src'},
entry_points={
'console_scripts': [
'barlink = barlink.websocket:main',
]
},
install_requires=[
'pyserial',
],
license='MIT',
description='Compact WebSocket server for barsystem',
long_description=open('README.md').read(),
url='https://github.com/TkkrLab/barsystem',
author='Jasper Seidel',
author_email='code@jawsper.nl',
)
| 21.576923 | 57 | 0.611408 |
ace99544b9fcd4f6a51afe06a77010b61640963b | 1,003 | py | Python | src2D/visualise.py | Naman-ntc/3D-HourGlass-Network | e58b7b6a78d35bc14fe4c0bc611f80022b2f409b | [
"MIT"
] | 53 | 2018-10-28T20:07:16.000Z | 2021-12-17T02:25:57.000Z | src2D/visualise.py | Naman-ntc/3D-HourGlass-Network | e58b7b6a78d35bc14fe4c0bc611f80022b2f409b | [
"MIT"
] | 3 | 2019-01-07T14:01:39.000Z | 2019-05-07T12:01:44.000Z | src2D/visualise.py | Naman-ntc/3D-HourGlass-Network | e58b7b6a78d35bc14fe4c0bc611f80022b2f409b | [
"MIT"
] | 9 | 2018-10-28T22:31:29.000Z | 2021-10-14T02:54:27.000Z | # -*- coding: utf-8 -*-
import os
import sys
from utils.pyTools import Show3d
import ref
import numpy as np
def visualise3d(pred,gt,epoch,iter,frindex,frame, opt):
pred_root_rel = pred[:,:3] - pred[ref.root,:3]
# gt_root_rel = gt[:,:3] - gt[ref.root, :3]
gt_length=0
len_pred=0
tot_cnt=0
for e in ref.edges:
if pred_root_rel[e[0]][0]!=0 and pred_root_rel[e[0]][1]!=0 and pred_root_rel[e[1]][1]!=0 and pred_root_rel[e[1]][1]!=0:
len_pred += ((pred_root_rel[e[0]][:2] - pred_root_rel[e[1]][:2]) ** 2).sum() ** 0.5
gt_length += ((gt[e[0]][:2] - gt[e[1]][:2]) ** 2).sum() ** 0.5
else:
tot_cnt=tot_cnt+1
gt_root = gt[ref.root]
for j in range(ref.nJoints):
pred_root_rel[j] = ((pred_root_rel[j]) / len_pred) * gt_length + gt_root
data={}
data['joint']=pred_root_rel
data['gt']=gt
os.system("mkdir -p Plots/mp%d"%(iter))
Show3d(data,'./Plots/mp%d/'%(iter), epoch, frindex, frame, opt)
| 30.393939 | 127 | 0.576271 |
ace995d28870eb552cc0e74f27b796d4ae9bea65 | 640 | py | Python | a-practical-introduction-to-python-programming-brian-heinold/chapter-05/exercise-05.py | elarabyelaidy19/awesome-reading | 5c01a4272ba58e4f7ea665aab14b4c0aa252ea89 | [
"MIT"
] | 31 | 2021-11-02T19:51:13.000Z | 2022-02-17T10:55:26.000Z | a-practical-introduction-to-python-programming-brian-heinold/chapter-05/exercise-05.py | MosTafaHoSamm/awesome-reading | 469408fefc049d78ed53a2b2331b5d5cecdc6c06 | [
"MIT"
] | 1 | 2022-01-18T12:27:54.000Z | 2022-01-18T12:27:54.000Z | a-practical-introduction-to-python-programming-brian-heinold/chapter-05/exercise-05.py | MosTafaHoSamm/awesome-reading | 469408fefc049d78ed53a2b2331b5d5cecdc6c06 | [
"MIT"
] | 3 | 2022-01-11T05:01:34.000Z | 2022-02-05T14:36:29.000Z | # 5. Write a program that asks the user to enter a number and prints the sum of the divisors of
# that number. The sum of the divisors of a number is an important function in number theory.
import math
number = eval(input('Enter a number: '))
sum = 0
# Efficient algorithm. Time Complexity: O(sqrt(n)).
# See: awesome-reading/a-practical-introduction-to-python-programming-brian-heinold/chapter-04/exercise-09.py
root = math.floor(math.sqrt(number))
for i in range(1, root + 1):
if number % i == 0:
sum += i
if (number // i) != i:
sum += number // i
print('The sum of the divisors of', number, 'is', sum)
| 35.555556 | 109 | 0.673438 |
ace995dbbb746142140da6da734dec61ea03dac8 | 5,333 | py | Python | IPsPrincipal.py | DrumSergio/statix | bd11a3c16d39b7273ccb44312aa44b34d13a177f | [
"MIT"
] | 1 | 2021-04-19T22:30:11.000Z | 2021-04-19T22:30:11.000Z | IPsPrincipal.py | DrumSergio/statix | bd11a3c16d39b7273ccb44312aa44b34d13a177f | [
"MIT"
] | null | null | null | IPsPrincipal.py | DrumSergio/statix | bd11a3c16d39b7273ccb44312aa44b34d13a177f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys, posix, time, binascii, socket, select
import hashlib, time
class ApiRos:
"Routeros api"
def __init__(self, sk):
self.sk = sk
self.currenttag = 0
def login(self, username, pwd):
for repl, attrs in self.talk(["/login"]):
chal = binascii.unhexlify((attrs['=ret']).encode('UTF-8'))
md = hashlib.md5()
md.update(b'\x00')
md.update(pwd.encode('UTF-8'))
md.update(chal)
self.talk(["/login", "=name=" + username,
"=response=00" + binascii.hexlify(md.digest()).decode('UTF-8') ])
def talk(self, words):
if self.writeSentence(words) == 0: return
r = []
while 1:
i = self.readSentence();
if len(i) == 0: continue
reply = i[0]
attrs = {}
for w in i[1:]:
j = w.find('=', 1)
if (j == -1):
attrs[w] = ''
else:
attrs[w[:j]] = w[j+1:]
r.append((reply, attrs))
if reply == '!done': return r
def writeSentence(self, words):
ret = 0
for w in words:
self.writeWord(w)
ret += 1
self.writeWord('')
return ret
def readSentence(self):
r = []
while 1:
w = self.readWord()
if w == '': return r
r.append(w)
def writeWord(self, w):
print(("<<< " + w))
self.writeLen(len(w))
self.writeStr(w)
def readWord(self):
ret = self.readStr(self.readLen())
print((">>> " + ret))
return ret
def writeLen(self, l):
if l < 0x80:
self.writeStr(chr(l))
elif l < 0x4000:
l |= 0x8000
self.writeStr(chr((l >> 8) & 0xFF))
self.writeStr(chr(l & 0xFF))
elif l < 0x200000:
l |= 0xC00000
self.writeStr(chr((l >> 16) & 0xFF))
self.writeStr(chr((l >> 8) & 0xFF))
self.writeStr(chr(l & 0xFF))
elif l < 0x10000000:
l |= 0xE0000000
self.writeStr(chr((l >> 24) & 0xFF))
self.writeStr(chr((l >> 16) & 0xFF))
self.writeStr(chr((l >> 8) & 0xFF))
self.writeStr(chr(l & 0xFF))
else:
self.writeStr(chr(0xF0))
self.writeStr(chr((l >> 24) & 0xFF))
self.writeStr(chr((l >> 16) & 0xFF))
self.writeStr(chr((l >> 8) & 0xFF))
self.writeStr(chr(l & 0xFF))
def readLen(self):
c = ord(self.readStr(1))
if (c & 0x80) == 0x00:
pass
elif (c & 0xC0) == 0x80:
c &= ~0xC0
c <<= 8
c += ord(self.readStr(1))
elif (c & 0xE0) == 0xC0:
c &= ~0xE0
c <<= 8
c += ord(self.readStr(1))
c <<= 8
c += ord(self.readStr(1))
elif (c & 0xF0) == 0xE0:
c &= ~0xF0
c <<= 8
c += ord(self.readStr(1))
c <<= 8
c += ord(self.readStr(1))
c <<= 8
c += ord(self.readStr(1))
elif (c & 0xF8) == 0xF0:
c = ord(self.readStr(1))
c <<= 8
c += ord(self.readStr(1))
c <<= 8
c += ord(self.readStr(1))
c <<= 8
c += ord(self.readStr(1))
return c
def writeStr(self, str):
n = 0;
while n < len(str):
r = self.sk.send(bytes(str[n:], 'UTF-8'))
if r == 0: raise RuntimeError("connection closed by remote end")
n += r
def readStr(self, length):
ret = ''
while len(ret) < length:
s = self.sk.recv(length - len(ret))
if s == '': raise RuntimeError("connection closed by remote end")
ret += s.decode('UTF-8', 'replace')
return ret
def main():
s = None
for res in socket.getaddrinfo("x.x.x.x", "8728", socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except (socket.error, msg):
s = None
continue
try:
s.connect(sa)
except (socket.error, msg):
s.close()
s = None
continue
break
if s is None:
print ('could not open socket')
sys.exit(1)
apiros = ApiRos(s);
apiros.login("admin", "PASSWORD");
inputsentence = ['/interface/eoip/print']
apiros.writeSentence(inputsentence)
t_end = time.time() + 4
while time.time() < t_end:
r = select.select([s], [], [], 0.0)
if s in r[0]:
# something to read in socket, read sentence
x = apiros.readSentence()
if __name__ == '__main__':
main()
| 31.187135 | 91 | 0.413463 |
ace9992cba9bfbee5b453007b7f5701da8b334a4 | 1,104 | py | Python | aristotle/apps/marc/urls.py | jermnelson/Discover-Aristotle | cc1ff79915d715801890a3a8642099304916adfa | [
"Apache-2.0"
] | 7 | 2015-03-13T09:56:16.000Z | 2021-05-03T13:39:05.000Z | aristotle/apps/marc/urls.py | jermnelson/Discover-Aristotle | cc1ff79915d715801890a3a8642099304916adfa | [
"Apache-2.0"
] | 1 | 2021-04-06T16:30:00.000Z | 2021-04-06T16:43:57.000Z | aristotle/apps/marc/urls.py | jermnelson/Discover-Aristotle | cc1ff79915d715801890a3a8642099304916adfa | [
"Apache-2.0"
] | 2 | 2015-12-18T16:51:07.000Z | 2016-02-26T09:56:42.000Z | """
urls.py - URL routing for MARC record utilities django app
"""
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright: 2011 Colorado College
#
__author__ = 'Jeremy Nelson'
import marc.views
from django.conf.urls.defaults import *
urlpatterns = patterns('marc.views',
url(r'^$','default',name='marc-index'),
(r'process$','process'),
url(r'search$','search',name='marc-search'),
url(r'download$','download',name='marc-download'),
url(r'update$','update_log',name='marc-update'),
(r'(\w+)','record_load'),
# (r'/success$','success'),
# (r'(\w+)/upload','upload'),
)
| 33.454545 | 74 | 0.696558 |
ace9993876f689d47927a55b6202d72ccaf049e4 | 18,974 | py | Python | tests/system/requests/test_download.py | HemangChothani/google-resumable-media-python | 7dc40de34533e4474240fc831b79cee2baa82c6e | [
"Apache-2.0"
] | null | null | null | tests/system/requests/test_download.py | HemangChothani/google-resumable-media-python | 7dc40de34533e4474240fc831b79cee2baa82c6e | [
"Apache-2.0"
] | 3 | 2019-07-07T17:55:56.000Z | 2019-08-05T01:13:27.000Z | tests/system/requests/test_download.py | HemangChothani/google-resumable-media-python | 7dc40de34533e4474240fc831b79cee2baa82c6e | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import copy
import hashlib
import io
import os
import google.auth
import google.auth.transport.requests as tr_requests
import pytest
from google.resumable_media import common
from six.moves import http_client
from google import resumable_media
import google.resumable_media.requests as resumable_requests
import google.resumable_media.requests.download as download_mod
from tests.system import utils
CURR_DIR = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = os.path.join(CURR_DIR, u'..', u'..', u'data')
PLAIN_TEXT = u'text/plain'
IMAGE_JPEG = u'image/jpeg'
ALL_FILES = (
{
u'path': os.path.realpath(os.path.join(DATA_DIR, u'image1.jpg')),
u'content_type': IMAGE_JPEG,
u'checksum': u'1bsd83IYNug8hd+V1ING3Q==',
u'slices': (
slice(1024, 16386, None), # obj[1024:16386]
slice(None, 8192, None), # obj[:8192]
slice(-256, None, None), # obj[-256:]
slice(262144, None, None), # obj[262144:]
),
}, {
u'path': os.path.realpath(os.path.join(DATA_DIR, u'image2.jpg')),
u'content_type': IMAGE_JPEG,
u'checksum': u'gdLXJltiYAMP9WZZFEQI1Q==',
u'slices': (
slice(1024, 16386, None), # obj[1024:16386]
slice(None, 8192, None), # obj[:8192]
slice(-256, None, None), # obj[-256:]
slice(262144, None, None), # obj[262144:]
),
}, {
u'path': os.path.realpath(os.path.join(DATA_DIR, u'file.txt')),
u'content_type': PLAIN_TEXT,
u'checksum': u'KHRs/+ZSrc/FuuR4qz/PZQ==',
u'slices': (),
}, {
u'path': os.path.realpath(os.path.join(DATA_DIR, u'gzipped.txt.gz')),
u'uncompressed':
os.path.realpath(os.path.join(DATA_DIR, u'gzipped.txt')),
u'content_type': PLAIN_TEXT,
u'checksum': u'KHRs/+ZSrc/FuuR4qz/PZQ==',
u'slices': (),
u'metadata': {
u'contentEncoding': u'gzip',
},
},
)
ENCRYPTED_ERR = (
b'The target object is encrypted by a customer-supplied encryption key.')
NO_BODY_ERR = (
u'The content for this response was already consumed')
NOT_FOUND_ERR = (
b'No such object: ' +
utils.BUCKET_NAME.encode('utf-8') +
b'/does-not-exist.txt'
)
class CorruptingAuthorizedSession(tr_requests.AuthorizedSession):
"""A Requests Session class with credentials, which corrupts responses.
This class is used for testing checksum validation.
Args:
credentials (google.auth.credentials.Credentials): The credentials to
add to the request.
refresh_status_codes (Sequence[int]): Which HTTP status codes indicate
that credentials should be refreshed and the request should be
retried.
max_refresh_attempts (int): The maximum number of times to attempt to
refresh the credentials and retry the request.
kwargs: Additional arguments passed to the :class:`requests.Session`
constructor.
"""
EMPTY_HASH = base64.b64encode(
hashlib.md5(b'').digest()).decode(u'utf-8')
def request(self, method, url, data=None, headers=None, **kwargs):
"""Implementation of Requests' request."""
response = tr_requests.AuthorizedSession.request(
self, method, url, data=data, headers=headers, **kwargs)
response.headers[download_mod._HASH_HEADER] = (
u'md5={}'.format(self.EMPTY_HASH))
return response
# Transport that returns corrupt data, so we can exercise checksum handling.
@pytest.fixture(scope=u'module')
def corrupting_transport():
credentials, _ = google.auth.default(scopes=(utils.GCS_RW_SCOPE,))
yield CorruptingAuthorizedSession(credentials)
def delete_blob(transport, blob_name):
metadata_url = utils.METADATA_URL_TEMPLATE.format(blob_name=blob_name)
response = transport.delete(metadata_url)
assert response.status_code == http_client.NO_CONTENT
def _get_contents_for_upload(info):
with open(info[u'path'], u'rb') as file_obj:
return file_obj.read()
def _get_contents(info):
full_path = info.get(u'uncompressed', info[u'path'])
with open(full_path, u'rb') as file_obj:
return file_obj.read()
def _get_blob_name(info):
full_path = info.get(u'uncompressed', info[u'path'])
return os.path.basename(full_path)
@pytest.fixture(scope=u'module')
def add_files(authorized_transport, bucket):
blob_names = []
for info in ALL_FILES:
to_upload = _get_contents_for_upload(info)
blob_name = _get_blob_name(info)
blob_names.append(blob_name)
if u'metadata' in info:
upload = resumable_requests.MultipartUpload(utils.MULTIPART_UPLOAD)
metadata = copy.deepcopy(info[u'metadata'])
metadata[u'name'] = blob_name
response = upload.transmit(
authorized_transport, to_upload,
metadata, info[u'content_type'])
else:
upload_url = utils.SIMPLE_UPLOAD_TEMPLATE.format(
blob_name=blob_name)
upload = resumable_requests.SimpleUpload(upload_url)
response = upload.transmit(
authorized_transport, to_upload, info[u'content_type'])
assert response.status_code == http_client.OK
yield
# Clean-up the blobs we created.
for blob_name in blob_names:
delete_blob(authorized_transport, blob_name)
def check_tombstoned(download, transport):
assert download.finished
if isinstance(download, resumable_requests.Download):
with pytest.raises(ValueError) as exc_info:
download.consume(transport)
assert exc_info.match(u'A download can only be used once.')
else:
with pytest.raises(ValueError) as exc_info:
download.consume_next_chunk(transport)
assert exc_info.match(u'Download has finished.')
def test_download_full(add_files, authorized_transport):
for info in ALL_FILES:
actual_contents = _get_contents(info)
blob_name = _get_blob_name(info)
# Create the actual download object.
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
download = resumable_requests.Download(media_url)
# Consume the resource.
response = download.consume(authorized_transport)
assert response.status_code == http_client.OK
assert response.content == actual_contents
check_tombstoned(download, authorized_transport)
def test_download_to_stream(add_files, authorized_transport):
for info in ALL_FILES:
actual_contents = _get_contents(info)
blob_name = _get_blob_name(info)
# Create the actual download object.
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
stream = io.BytesIO()
download = resumable_requests.Download(media_url, stream=stream)
# Consume the resource.
response = download.consume(authorized_transport)
assert response.status_code == http_client.OK
with pytest.raises(RuntimeError) as exc_info:
getattr(response, u'content')
assert exc_info.value.args == (NO_BODY_ERR,)
assert response._content is False
assert response._content_consumed is True
assert stream.getvalue() == actual_contents
check_tombstoned(download, authorized_transport)
@pytest.mark.xfail # See: #76
def test_corrupt_download(add_files, corrupting_transport):
for info in ALL_FILES:
blob_name = _get_blob_name(info)
# Create the actual download object.
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
stream = io.BytesIO()
download = resumable_requests.Download(media_url, stream=stream)
# Consume the resource.
with pytest.raises(common.DataCorruption) as exc_info:
download.consume(corrupting_transport)
assert download.finished
msg = download_mod._CHECKSUM_MISMATCH.format(
download.media_url, CorruptingAuthorizedSession.EMPTY_HASH,
info[u'checksum'])
assert exc_info.value.args == (msg,)
@pytest.fixture(scope=u'module')
def secret_file(authorized_transport, bucket):
blob_name = u'super-seekrit.txt'
data = b'Please do not tell anyone my encrypted seekrit.'
upload_url = utils.SIMPLE_UPLOAD_TEMPLATE.format(blob_name=blob_name)
headers = utils.get_encryption_headers()
upload = resumable_requests.SimpleUpload(upload_url, headers=headers)
response = upload.transmit(authorized_transport, data, PLAIN_TEXT)
assert response.status_code == http_client.OK
yield blob_name, data, headers
delete_blob(authorized_transport, blob_name)
def check_error_response(exc_info, status_code, message):
error = exc_info.value
response = error.response
assert response.status_code == status_code
assert response.content.startswith(message)
assert len(error.args) == 5
assert error.args[1] == status_code
assert error.args[3] == http_client.OK
assert error.args[4] == http_client.PARTIAL_CONTENT
def test_extra_headers(authorized_transport, secret_file):
blob_name, data, headers = secret_file
# Create the actual download object.
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
download = resumable_requests.Download(media_url, headers=headers)
# Consume the resource.
response = download.consume(authorized_transport)
assert response.status_code == http_client.OK
assert response.content == data
check_tombstoned(download, authorized_transport)
# Attempt to consume the resource **without** the headers.
download_wo = resumable_requests.Download(media_url)
with pytest.raises(resumable_media.InvalidResponse) as exc_info:
download_wo.consume(authorized_transport)
check_error_response(exc_info, http_client.BAD_REQUEST, ENCRYPTED_ERR)
check_tombstoned(download_wo, authorized_transport)
def test_non_existent_file(authorized_transport, bucket):
blob_name = u'does-not-exist.txt'
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
download = resumable_requests.Download(media_url)
# Try to consume the resource and fail.
with pytest.raises(resumable_media.InvalidResponse) as exc_info:
download.consume(authorized_transport)
check_error_response(exc_info, http_client.NOT_FOUND, NOT_FOUND_ERR)
check_tombstoned(download, authorized_transport)
@pytest.fixture(scope=u'module')
def simple_file(authorized_transport, bucket):
blob_name = u'basic-file.txt'
upload_url = utils.SIMPLE_UPLOAD_TEMPLATE.format(blob_name=blob_name)
upload = resumable_requests.SimpleUpload(upload_url)
data = b'Simple contents'
response = upload.transmit(authorized_transport, data, PLAIN_TEXT)
assert response.status_code == http_client.OK
yield blob_name, data
delete_blob(authorized_transport, blob_name)
def test_bad_range(simple_file, authorized_transport):
blob_name, data = simple_file
# Make sure we have an invalid range.
start = 32
end = 63
assert len(data) < start < end
# Create the actual download object.
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
download = resumable_requests.Download(media_url, start=start, end=end)
# Try to consume the resource and fail.
with pytest.raises(resumable_media.InvalidResponse) as exc_info:
download.consume(authorized_transport)
check_error_response(
exc_info, http_client.REQUESTED_RANGE_NOT_SATISFIABLE,
b'Request range not satisfiable')
check_tombstoned(download, authorized_transport)
def _download_slice(media_url, slice_):
assert slice_.step is None
end = None
if slice_.stop is not None:
end = slice_.stop - 1
return resumable_requests.Download(
media_url, start=slice_.start, end=end)
def test_download_partial(add_files, authorized_transport):
for info in ALL_FILES:
actual_contents = _get_contents(info)
blob_name = _get_blob_name(info)
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
for slice_ in info[u'slices']:
download = _download_slice(media_url, slice_)
response = download.consume(authorized_transport)
assert response.status_code == http_client.PARTIAL_CONTENT
assert response.content == actual_contents[slice_]
with pytest.raises(ValueError):
download.consume(authorized_transport)
def get_chunk_size(min_chunks, total_bytes):
# Make sure the number of chunks **DOES NOT** evenly divide.
num_chunks = min_chunks
while total_bytes % num_chunks == 0:
num_chunks += 1
chunk_size = total_bytes // num_chunks
# Since we know an integer division has remainder, increment by 1.
chunk_size += 1
assert total_bytes < num_chunks * chunk_size
return num_chunks, chunk_size
def consume_chunks(download, authorized_transport,
total_bytes, actual_contents):
start_byte = download.start
end_byte = download.end
if end_byte is None:
end_byte = total_bytes - 1
num_responses = 0
while not download.finished:
response = download.consume_next_chunk(authorized_transport)
num_responses += 1
next_byte = min(start_byte + download.chunk_size, end_byte + 1)
assert download.bytes_downloaded == next_byte - download.start
assert download.total_bytes == total_bytes
assert response.status_code == http_client.PARTIAL_CONTENT
assert response.content == actual_contents[start_byte:next_byte]
start_byte = next_byte
return num_responses, response
@pytest.mark.xfail # See issue #56
def test_chunked_download(add_files, authorized_transport):
for info in ALL_FILES:
actual_contents = _get_contents(info)
blob_name = _get_blob_name(info)
total_bytes = len(actual_contents)
num_chunks, chunk_size = get_chunk_size(7, total_bytes)
# Create the actual download object.
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
stream = io.BytesIO()
download = resumable_requests.ChunkedDownload(
media_url, chunk_size, stream)
# Consume the resource in chunks.
num_responses, last_response = consume_chunks(
download, authorized_transport,
total_bytes, actual_contents)
# Make sure the combined chunks are the whole object.
assert stream.getvalue() == actual_contents
# Check that we have the right number of responses.
assert num_responses == num_chunks
# Make sure the last chunk isn't the same size.
assert total_bytes % chunk_size != 0
assert len(last_response.content) < chunk_size
check_tombstoned(download, authorized_transport)
def test_chunked_download_partial(add_files, authorized_transport):
for info in ALL_FILES:
actual_contents = _get_contents(info)
blob_name = _get_blob_name(info)
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
for slice_ in info[u'slices']:
# Manually replace a missing start with 0.
start = 0 if slice_.start is None else slice_.start
# Chunked downloads don't support a negative index.
if start < 0:
continue
# First determine how much content is in the slice and
# use it to determine a chunking strategy.
total_bytes = len(actual_contents)
if slice_.stop is None:
end_byte = total_bytes - 1
end = None
else:
# Python slices DO NOT include the last index, though a byte
# range **is** inclusive of both endpoints.
end_byte = slice_.stop - 1
end = end_byte
num_chunks, chunk_size = get_chunk_size(
7, end_byte - start + 1)
# Create the actual download object.
stream = io.BytesIO()
download = resumable_requests.ChunkedDownload(
media_url, chunk_size, stream, start=start, end=end)
# Consume the resource in chunks.
num_responses, last_response = consume_chunks(
download, authorized_transport, total_bytes, actual_contents)
# Make sure the combined chunks are the whole slice.
assert stream.getvalue() == actual_contents[slice_]
# Check that we have the right number of responses.
assert num_responses == num_chunks
# Make sure the last chunk isn't the same size.
assert len(last_response.content) < chunk_size
check_tombstoned(download, authorized_transport)
def test_chunked_with_extra_headers(authorized_transport, secret_file):
blob_name, data, headers = secret_file
num_chunks = 4
chunk_size = 12
assert (num_chunks - 1) * chunk_size < len(data) < num_chunks * chunk_size
# Create the actual download object.
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
stream = io.BytesIO()
download = resumable_requests.ChunkedDownload(
media_url, chunk_size, stream, headers=headers)
# Consume the resource in chunks.
num_responses, last_response = consume_chunks(
download, authorized_transport, len(data), data)
# Make sure the combined chunks are the whole object.
assert stream.getvalue() == data
# Check that we have the right number of responses.
assert num_responses == num_chunks
# Make sure the last chunk isn't the same size.
assert len(last_response.content) < chunk_size
check_tombstoned(download, authorized_transport)
# Attempt to consume the resource **without** the headers.
stream_wo = io.BytesIO()
download_wo = resumable_requests.ChunkedDownload(
media_url, chunk_size, stream_wo)
with pytest.raises(resumable_media.InvalidResponse) as exc_info:
download_wo.consume_next_chunk(authorized_transport)
assert stream_wo.tell() == 0
check_error_response(exc_info, http_client.BAD_REQUEST, ENCRYPTED_ERR)
assert download_wo.invalid
| 38.100402 | 79 | 0.69279 |
ace999e0a6b806389ce310f48e52d43c3230943c | 6,884 | py | Python | casa/M100_Band3_12m_Imaging.py | teuben/sd2018 | 2209b3b6ad6a0f693745fed525ef611e9a6ca128 | [
"MIT"
] | 1 | 2018-03-16T10:08:04.000Z | 2018-03-16T10:08:04.000Z | casa/M100_Band3_12m_Imaging.py | teuben/sd2018 | 2209b3b6ad6a0f693745fed525ef611e9a6ca128 | [
"MIT"
] | null | null | null | casa/M100_Band3_12m_Imaging.py | teuben/sd2018 | 2209b3b6ad6a0f693745fed525ef611e9a6ca128 | [
"MIT"
] | null | null | null | # Imaging script for M100, 12m data
# Tested in CASA version 4.3.0 revision 31966
#--------------------------------------------------------------------------------------#
# Data Preparation #
# -------------------------------------------------------------------------------------#
# Comment this step out if you are starting from the calibrated data
# M100_Band3_12m_CalibratedData.ms downloaded from the Science Portal
# Split off the M100 calibrated data
split (vis='uid___A002_X273b43_X146.ms.split.cal', field='M100', outputvis='M100_X146.ms.cal', datacolumn='data',keepflags=F)
split (vis='uid___A002_X2a5c2f_X220.ms.split.cal', field='M100', outputvis='M100_X220.ms.cal', datacolumn='data',keepflags=F)
split (vis='uid___A002_X2a5c2f_X54.ms.split.cal', field='M100', outputvis='M100_X54.ms.cal', datacolumn='data',keepflags=F)
# Combine all the ms into one
concat(vis=['M100_X146.ms.cal', 'M100_X220.ms.cal', 'M100_X54.ms.cal'],concatvis='M100_Band3_12m_CalibratedData.ms')
# From this point on you can proceed from M100_Band3_12m_CalibratedData.ms
#--------------------------------------------------------------------------------------#
# Imaging #
#--------------------------------------------------------------------------------------#
##################################################
# Check CASA version
version = casadef.casa_version
print "You are using " + version
if (version < '4.3.0'):
print "YOUR VERSION OF CASA IS TOO OLD FOR THIS GUIDE."
print "PLEASE UPDATE IT BEFORE PROCEEDING."
else:
print "Your version of CASA is appropriate for this guide."
##################################################
# Identify Line-free SPWs and channels
finalvis='M100_Band3_12m_CalibratedData.ms'
# Use plotms to identify line and continuum spectral windows
plotms(vis=finalvis, xaxis='channel', yaxis='amplitude',
ydatacolumn='data',
avgtime='1e8', avgscan=True, avgchannel='2',
iteraxis='spw' )
##################################################
# Create an Averaged Continuum MS
# Average channels within spws
contspws='1,2,3' # from plotms output
contvis='M100_12m_cont.ms'
os.system('rm -rf ' + contvis)
split(vis=finalvis,
spw=contspws,
outputvis=contvis,
width=[3840,3840,3840], # number of channels to average together. change to appropriate value for each spectral window.
datacolumn='data')
# these observations contain continuum-only SPWs: 1,2,3 (line is in SPW 0)
#############################################
# Imaging the Continuuum
contimagename = 'M100_12m_cont'
for ext in ['.flux','.image','.mask','.model','.pbcor','.psf','.residual']:
os.system('rm -rf ' + contimagename + ext)
clean(vis=contvis,
imagename=contimagename,
field='1~47' # science fields
phasecenter='J2000 12h22m54.9 +15d49m15',
mode='mfs',
psfmode='clark',
imsize = [200,200], # size of image in pixels
cell= '0.5arcsec', # cell size for imaging
weighting = 'briggs',
robust = 0.5,
niter = 1000,
threshold = '0.0mJy',
mask=[70,90,118,130],
interactive = False,
imagermode = 'mosaic')
# RMS ~ 0.12 mJy in a 3.7"x2.5" beam
########################################
# Continuum Subtraction for Line Imaging
fitspw = '0:200~1500;2500~3700,1~3:100~3700' # line-free channel for fitting continuum
linespw = '0' # line spectral windows.
uvcontsub(vis=finalvis,
spw=linespw, # spw to do continuum subtraction on
fitspw=fitspw,
combine='',
solint='int',
fitorder=1,
want_cont=False)
##############################################
# Image line emission
linevis = finalvis+'.contsub'
lineimagename = 'M100_12m_CO' # name of line image
# If you do not wish to use the provided mask, comment out the line: "mask=''"
for ext in ['.flux','.image','.mask','.model','.pbcor','.psf','.residual','.flux.pbcoverage']:
os.system('rm -rf ' + lineimagename + ext)
clean(vis=linevis,
imagename=lineimagename,
field='1~47', # science fields
spw='0:1500~2500',
phasecenter='J2000 12h22m54.9 +15d49m15',
mode='velocity',
start='1400km/s', # start velocity
width='5km/s', # velocity width
nchan= 70, # number of channels
outframe='LSRK', # velocity reference frame
veltype='radio', # velocity type
restfreq='115.271201800GHz', # rest frequency of primary line of interest
niter=10000,
threshold='0.015Jy',
interactive=True,
minpb=0.2,
cell='0.5arcsec',
imsize=800,
weighting='briggs',
robust=0.5,
imagermode='mosaic',
mask='M100_12m_CO_demo.mask')
# M100_12m_CO.image (and other files) created -- use .mask file to reproduce my masking by hand
# rms in line free channel (width 5 km/s): 11-12 mJy/beam
##############################################
# Apply a primary beam correction
impbcor(imagename='M100_12m_CO.image', pbimage='M100_12m_CO.flux', outfile='M100_12m_CO.pbcor')
##############################################
# Make moment maps of the CO(1-0) emission
immoments(imagename = 'M100_12m_CO.image',
moments = [0],
axis = 'spectral',chans = '9~60',
includepix = [0.02,100.],
outfile = 'M100_12m_CO.image.mom0')
immoments(imagename = 'M100_12m_CO.image',
moments = [1],
axis = 'spectral',chans = '9~60',
includepix = [0.05,100.],
outfile = 'M100_12m_CO.image.mom1')
immoments(imagename = 'M100_12m_CO.pbcor',
moments = [0],
axis = 'spectral',chans = '9~60',
includepix = [0.02,100.],
outfile = 'M100_12m_CO.image.pbcor.mom0')
# Make some png plots
imview (raster=[{'file': 'M100_12m_CO.image.mom0',
'range': [-0.3,25.],'scaling': -1.0,'colorwedge': T}],
zoom={'blc': [190,150],'trc': [650,610]},
out='M100_12m_CO.image.mom0.png')
imview (raster=[{'file': 'M100_12m_CO.image.mom1',
'range': [1455,1695],'colorwedge': T}],
zoom={'blc': [190,150],'trc': [650,610]},
out='M100_12m_CO.image.mom1.png')
##############################################
# Export the images
exportfits(imagename='M100_12m_CO.image', fitsimage='M100_12m_CO.image.fits')
exportfits(imagename='M100_12m_CO.flux', fitsimage='M100_12m_CO.flux.fits')
exportfits(imagename='M100_12m_CO.pbcor', fitsimage='M100_12m_CO.pbcor.fits')
exportfits(imagename='M100_12m_CO.image.mom0', fitsimage='M100_12m_CO.image.mom0.fits')
exportfits(imagename='M100_12m_CO.image.pbcor.mom0', fitsimage='M100_12m_CO.image.mom0.pbcor.fits')
exportfits(imagename='M100_12m_CO.image.mom1', fitsimage='M100_12m_CO.image.mom1.fits')
| 35.854167 | 125 | 0.576554 |
ace99a8177e6d827ab7eb0a1ce580dd71956f2c7 | 618 | py | Python | autoscalingsim/scaling/policiesbuilder/scaled/scaling_aggregation_rules/parallel_rules_impl/min_scale.py | Remit/autoscaling-simulator | 091943c0e9eedf9543e9305682a067ab60f56def | [
"MIT"
] | 6 | 2021-03-10T16:23:10.000Z | 2022-01-14T04:57:46.000Z | autoscalingsim/scaling/policiesbuilder/scaled/scaling_aggregation_rules/parallel_rules_impl/min_scale.py | Remit/autoscaling-simulator | 091943c0e9eedf9543e9305682a067ab60f56def | [
"MIT"
] | null | null | null | autoscalingsim/scaling/policiesbuilder/scaled/scaling_aggregation_rules/parallel_rules_impl/min_scale.py | Remit/autoscaling-simulator | 091943c0e9eedf9543e9305682a067ab60f56def | [
"MIT"
] | 1 | 2022-01-14T04:57:55.000Z | 2022-01-14T04:57:55.000Z | from autoscalingsim.scaling.policiesbuilder.scaled.scaling_aggregation import ScalingEffectAggregationRule
from autoscalingsim.scaling.policiesbuilder.scaled.scaling_aggregation_rules.parallel import ParallelScalingEffectAggregationRule
@ScalingEffectAggregationRule.register('minScale')
class MinScalingEffectAggregationRule(ParallelScalingEffectAggregationRule):
def __init__(self, service_name : str, regions : list, scaling_setting_for_service : 'ScaledServiceScalingSettings', state_reader : 'StateReader'):
super().__init__(service_name, regions, scaling_setting_for_service, state_reader, 'min')
| 61.8 | 151 | 0.855987 |
ace99b5c1315e8dfbdb9bd941456d54cf6349bcc | 474 | py | Python | apps/iiif/kollections/migrations/0008_auto_20211012_1612.py | ecds/readux | 4eac8b48efef8126f4f2be28b5eb943c85a89c2e | [
"Apache-2.0"
] | 18 | 2017-06-12T09:58:02.000Z | 2021-10-01T11:14:34.000Z | apps/iiif/kollections/migrations/0008_auto_20211012_1612.py | ecds/readux | 4eac8b48efef8126f4f2be28b5eb943c85a89c2e | [
"Apache-2.0"
] | 276 | 2019-04-26T20:13:01.000Z | 2022-03-31T10:26:28.000Z | apps/iiif/kollections/migrations/0008_auto_20211012_1612.py | ecds/readux | 4eac8b48efef8126f4f2be28b5eb943c85a89c2e | [
"Apache-2.0"
] | 7 | 2018-03-13T23:44:26.000Z | 2021-09-15T17:54:55.000Z | # Generated by Django 2.2.24 on 2021-10-12 16:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kollections', '0007_auto_20211007_2031'),
]
operations = [
migrations.AlterField(
model_name='collection',
name='pid',
field=models.CharField(default='2qmwbpfz', help_text="Unique ID. Do not use _'s or spaces in the pid.", max_length=255),
),
]
| 24.947368 | 132 | 0.624473 |
ace99b7d24479f61c23c3820023f748cf2cd2468 | 195 | py | Python | workshop_petstagram/workshop_petstagram/main/templatetags/profiles.py | trenev/softuni-python-web-basics | 0fcf6b7f3389d06685d40615c376dc4027e772f2 | [
"MIT"
] | 1 | 2022-03-03T10:16:14.000Z | 2022-03-03T10:16:14.000Z | workshop_petstagram/workshop_petstagram/main/templatetags/profiles.py | trenev/softuni-python-web-basics | 0fcf6b7f3389d06685d40615c376dc4027e772f2 | [
"MIT"
] | null | null | null | workshop_petstagram/workshop_petstagram/main/templatetags/profiles.py | trenev/softuni-python-web-basics | 0fcf6b7f3389d06685d40615c376dc4027e772f2 | [
"MIT"
] | null | null | null | from django import template
from workshop_petstagram.main.models import Profile
register = template.Library()
@register.simple_tag()
def has_profile():
return Profile.objects.count() > 0
| 17.727273 | 51 | 0.774359 |
ace99b86c2188835aa1f24602dcc1efc5da887f5 | 1,718 | py | Python | scripts/util_meshmover.py | wpumacay/tysocTerrain | 78b6d9804ade89a483fb60952ed6e1bf50fbf3da | [
"MIT"
] | 1 | 2020-06-17T00:57:11.000Z | 2020-06-17T00:57:11.000Z | scripts/util_meshmover.py | wpumacay/tysocCore | 78b6d9804ade89a483fb60952ed6e1bf50fbf3da | [
"MIT"
] | 7 | 2019-05-30T03:41:42.000Z | 2020-08-21T06:29:52.000Z | scripts/util_meshmover.py | wpumacay/tysoc | 78b6d9804ade89a483fb60952ed6e1bf50fbf3da | [
"MIT"
] | null | null | null |
import os
import subprocess as sp
RESOURCES_FOLDER = os.path.abspath( os.path.join( os.path.dirname( __file__ ), '../res/xml' ) )
WORKING_MESHES_FOLDER = os.path.join( RESOURCES_FOLDER, 'laikago_meshes' )
def moveResourceToRoot( rootFolder, filename, folder ) :
_extensionPos = -1
if '.stl' in filename :
_extensionPos = filename.find( '.stl' )
elif '.STL' in filename :
_extensionPos = filename.find( '.STL' )
elif '.obj' in filename :
_extensionPos = filename.find( '.obj' )
elif '.OBJ' in filename :
_extensionPos = filename.find( '.OBJ' )
if _extensionPos == -1 :
return
_normFilename = filename[:_extensionPos] + filename[_extensionPos:].lower()
_srcFilepath = os.path.join( folder, filename )
_dstFilepath = os.path.join( rootFolder, _normFilename )
if _srcFilepath != _dstFilepath :
print( 'moving: %s to %s' % ( _srcFilepath, _dstFilepath ) )
sp.call( ['mv', _srcFilepath, _dstFilepath] )
def grabFromSubfolder( rootFolder, currentFolder ) :
_elements = os.listdir( currentFolder )
_elements = sorted( _elements, key=lambda x : os.path.isdir( x ) )
for _elem in _elements :
if os.path.isdir( os.path.join( currentFolder, _elem ) ) :
_subfolder = os.path.join( currentFolder, _elem )
grabFromSubfolder( rootFolder, _subfolder )
## sp.call( ['rm', '-r', _subfolder] )
else :
moveResourceToRoot( rootFolder, _elem, currentFolder )
if __name__ == '__main__' :
print( 'resfolder: ', RESOURCES_FOLDER )
print( 'meshesfolder: ', WORKING_MESHES_FOLDER )
grabFromSubfolder( WORKING_MESHES_FOLDER, WORKING_MESHES_FOLDER )
| 33.686275 | 95 | 0.652503 |
ace99cf206dc4075127b1c8e0f3ab8ca9e6e8106 | 365 | py | Python | setup.py | the-okn3/volafile-downloader | 91bb98a2e3384cc522c549aac2734d1f21fecaea | [
"MIT"
] | 29 | 2017-08-26T12:35:09.000Z | 2020-09-22T23:20:46.000Z | setup.py | the-okn3/volafile-downloader | 91bb98a2e3384cc522c549aac2734d1f21fecaea | [
"MIT"
] | 2 | 2018-09-10T08:16:31.000Z | 2019-03-14T19:58:59.000Z | setup.py | the-okn3/volafile-downloader | 91bb98a2e3384cc522c549aac2734d1f21fecaea | [
"MIT"
] | 6 | 2018-08-30T14:59:08.000Z | 2020-12-06T08:12:25.000Z | from setuptools import setup
setup(
name="volafile-downloader",
version="2.2.0",
scripts=["volafile-downloader"],
description="Volafile.org files downloader",
author="Okn3",
author_email="okn3@protonmail.com",
url="https://github.com/the-okn3/volafile-downloader",
keywords=["volafile", "downloader", "download", "files", "chat"]
)
| 28.076923 | 68 | 0.679452 |
ace99cfa9110f733b74a638bd28490854c6c6602 | 7,116 | py | Python | kubernetes/client/models/com_coreos_monitoring_v1_alertmanager_spec_affinity_node_affinity_preference_match_expressions.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/com_coreos_monitoring_v1_alertmanager_spec_affinity_node_affinity_preference_match_expressions.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/com_coreos_monitoring_v1_alertmanager_spec_affinity_node_affinity_preference_match_expressions.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class ComCoreosMonitoringV1AlertmanagerSpecAffinityNodeAffinityPreferenceMatchExpressions(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'key': 'str',
'operator': 'str',
'values': 'list[str]'
}
attribute_map = {
'key': 'key',
'operator': 'operator',
'values': 'values'
}
def __init__(self, key=None, operator=None, values=None, local_vars_configuration=None): # noqa: E501
"""ComCoreosMonitoringV1AlertmanagerSpecAffinityNodeAffinityPreferenceMatchExpressions - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._key = None
self._operator = None
self._values = None
self.discriminator = None
self.key = key
self.operator = operator
if values is not None:
self.values = values
@property
def key(self):
"""Gets the key of this ComCoreosMonitoringV1AlertmanagerSpecAffinityNodeAffinityPreferenceMatchExpressions. # noqa: E501
The label key that the selector applies to. # noqa: E501
:return: The key of this ComCoreosMonitoringV1AlertmanagerSpecAffinityNodeAffinityPreferenceMatchExpressions. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this ComCoreosMonitoringV1AlertmanagerSpecAffinityNodeAffinityPreferenceMatchExpressions.
The label key that the selector applies to. # noqa: E501
:param key: The key of this ComCoreosMonitoringV1AlertmanagerSpecAffinityNodeAffinityPreferenceMatchExpressions. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
self._key = key
@property
def operator(self):
"""Gets the operator of this ComCoreosMonitoringV1AlertmanagerSpecAffinityNodeAffinityPreferenceMatchExpressions. # noqa: E501
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. # noqa: E501
:return: The operator of this ComCoreosMonitoringV1AlertmanagerSpecAffinityNodeAffinityPreferenceMatchExpressions. # noqa: E501
:rtype: str
"""
return self._operator
@operator.setter
def operator(self, operator):
"""Sets the operator of this ComCoreosMonitoringV1AlertmanagerSpecAffinityNodeAffinityPreferenceMatchExpressions.
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. # noqa: E501
:param operator: The operator of this ComCoreosMonitoringV1AlertmanagerSpecAffinityNodeAffinityPreferenceMatchExpressions. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and operator is None: # noqa: E501
raise ValueError("Invalid value for `operator`, must not be `None`") # noqa: E501
self._operator = operator
@property
def values(self):
"""Gets the values of this ComCoreosMonitoringV1AlertmanagerSpecAffinityNodeAffinityPreferenceMatchExpressions. # noqa: E501
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. # noqa: E501
:return: The values of this ComCoreosMonitoringV1AlertmanagerSpecAffinityNodeAffinityPreferenceMatchExpressions. # noqa: E501
:rtype: list[str]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this ComCoreosMonitoringV1AlertmanagerSpecAffinityNodeAffinityPreferenceMatchExpressions.
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. # noqa: E501
:param values: The values of this ComCoreosMonitoringV1AlertmanagerSpecAffinityNodeAffinityPreferenceMatchExpressions. # noqa: E501
:type: list[str]
"""
self._values = values
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ComCoreosMonitoringV1AlertmanagerSpecAffinityNodeAffinityPreferenceMatchExpressions):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ComCoreosMonitoringV1AlertmanagerSpecAffinityNodeAffinityPreferenceMatchExpressions):
return True
return self.to_dict() != other.to_dict()
| 39.314917 | 361 | 0.668494 |
ace99cff74f146a885d1a9398e2fad02d8cd2bcc | 10,161 | py | Python | unittests/libtests/feassemble/data/ElasticityApp.py | joegeisz/pylith | f74060b7b19d7e90abf8597bbe9250c96593c0ad | [
"MIT"
] | 1 | 2021-09-09T06:24:11.000Z | 2021-09-09T06:24:11.000Z | unittests/libtests/feassemble/data/ElasticityApp.py | joegeisz/pylith | f74060b7b19d7e90abf8597bbe9250c96593c0ad | [
"MIT"
] | null | null | null | unittests/libtests/feassemble/data/ElasticityApp.py | joegeisz/pylith | f74060b7b19d7e90abf8597bbe9250c96593c0ad | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file unittests/libtests/feassemble/data/ElasticityApp.py
## @brief Python application for generating C++ data files for testing
## C++ elasticity integrator objects.
from IntegratorApp import IntegratorApp
import numpy
import feutils
# ----------------------------------------------------------------------
# ElasticityApp class
class ElasticityApp(IntegratorApp):
"""
Python application for generating C++ data files for testing C++
elasticity integrator objects.
"""
# INVENTORY //////////////////////////////////////////////////////////
class Inventory(IntegratorApp.Inventory):
"""Python object for managing IntegratorApp facilities and properties."""
## @class Inventory
## Python object for managing ElasticityIntegrator facilities and
## properties.
##
## \b Properties
## @li \b useGravity Include gravitational body forces in residual.
##
## \b Facilities
## @li \b formulation Elasticity formulation.
import pyre.inventory
useGravity = pyre.inventory.bool("use_gravity", default=False)
useGravity.meta['tip'] = "Include gravitational body forces in residual."
from ElasticityImplicit import ElasticityImplicit
formulation = pyre.inventory.facility("formulation",
factory=ElasticityImplicit)
formulation.meta['tip'] = "Elasticity formulation."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="elasticityapp"):
"""
Constructor.
"""
IntegratorApp.__init__(self, name)
self.density = None
self.lameMu = None
self.lameLambda = None
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Set members using inventory.
"""
IntegratorApp._configure(self)
self.useGravity = self.inventory.useGravity
self.formulation = self.inventory.formulation
return
def _calculateResidual(self):
"""
Calculate contribution to residual of operator for integrator.
"""
self.valsResidual = self.formulation.calculateResidual(self)
if self.useGravity:
self.valsResidual += self._calculateGravityVec()
return
def _calculateJacobian(self):
"""
Calculate contribution to Jacobian matrix of operator for integrator.
"""
self.valsJacobian = self.formulation.calculateJacobian(self)
return
def _calculateStiffnessMat(self):
"""
Calculate stiffness matrix.
"""
import feutils
K = numpy.zeros( (self.spaceDim*self.numVertices,
self.spaceDim*self.numVertices),
dtype=numpy.float64)
# Matrix of elasticity values
D = self._calculateElasticityMat()
for cell in self.cells:
cellK = numpy.zeros( (self.spaceDim*self.numBasis,
self.spaceDim*self.numBasis),
dtype=numpy.float64)
vertices = self.vertices[cell, :]
(jacobian, jacobianInv, jacobianDet, basisDeriv) = \
feutils.calculateJacobian(self.quadrature, vertices)
for iQuad in xrange(self.numQuadPts):
wt = self.quadWts[iQuad] * jacobianDet[iQuad]
B = self._calculateBasisDerivMat(basisDeriv, iQuad)
cellK[:] += wt * numpy.dot(numpy.dot(B.transpose(), D), B)
feutils.assembleMat(K, cellK, cell, self.spaceDim)
return K
def _calculateMassMat(self):
"""
Calculate mass matrix.
"""
M = numpy.zeros( (self.spaceDim*self.numVertices,
self.spaceDim*self.numVertices),
dtype=numpy.float64)
for cell in self.cells:
cellM = numpy.zeros( (self.spaceDim*self.numBasis,
self.spaceDim*self.numBasis),
dtype=numpy.float64)
vertices = self.vertices[cell, :]
(jacobian, jacobianInv, jacobianDet, basisDeriv) = \
feutils.calculateJacobian(self.quadrature, vertices)
for iQuad in xrange(self.numQuadPts):
wt = self.quadWts[iQuad] * jacobianDet[iQuad]
N = self._calculateBasisMat(iQuad)
cellM[:] += self.density * wt * numpy.dot(N.transpose(), N)
feutils.assembleMat(M, cellM, cell, self.spaceDim)
return M
def _calculateGravityVec(self):
"""
Calculate body force vector.
"""
gravityGlobal = numpy.zeros((self.numVertices*self.spaceDim),
dtype=numpy.float64)
for cell in self.cells:
gravityCell = numpy.zeros((self.spaceDim*self.numBasis))
vertices = self.vertices[cell, :]
(jacobian, jacobianInv, jacobianDet, basisDeriv) = \
feutils.calculateJacobian(self.quadrature, vertices)
for iQuad in xrange(self.numQuadPts):
wt = self.quadWts[iQuad] * jacobianDet[iQuad] * self.density
for iBasis in xrange(self.numBasis):
valI = wt * self.basis[iQuad, iBasis]
for iDim in xrange(self.spaceDim):
gravityCell[iDim + iBasis * self.spaceDim] += \
valI * self.gravityVec[iDim]
feutils.assembleVec(gravityGlobal, gravityCell, cell, self.spaceDim)
return gravityGlobal
def _calculateElasticityMat(self):
"""
Calculate elasticity matrix.
"""
if 1 == self.cellDim:
lambda2mu = self.lameLambda + 2*self.lameMu
C1111 = lambda2mu
D = numpy.array([ [C1111] ],
dtype=numpy.float64)
elif 2 == self.cellDim:
lambda2mu = self.lameLambda + 2*self.lameMu
C1111 = lambda2mu
C1122 = self.lameLambda
C1112 = 0.0
C2211 = self.lameLambda
C2222 = lambda2mu
C2212 = 0.0
C1211 = 0.0
C1222 = 0.0
C1212 = 2.0*self.lameMu
D = numpy.array([ [C1111, C1122, 0.5*C1112],
[C2211, C2222, 0.5*C2212],
[0.5*C1211, 0.5*C1222, 0.5*C1212] ],
dtype=numpy.float64)
elif 3 == self.cellDim:
lambda2mu = self.lameLambda + 2.0*self.lameMu
C1111 = lambda2mu
C1122 = self.lameLambda
C1133 = self.lameLambda
C1112 = 0.0
C1123 = 0.0
C1113 = 0.0
C2211 = self.lameLambda
C2222 = lambda2mu
C2233 = self.lameLambda
C2212 = 0.0
C2223 = 0.0
C2213 = 0.0
C3311 = self.lameLambda
C3322 = self.lameLambda
C3333 = lambda2mu
C3312 = 0.0
C3323 = 0.0
C3313 = 0.0
C1211 = 0.0
C1222 = 0.0
C1233 = 0.0
C1212 = 2.0*self.lameMu
C1223 = 0.0
C1213 = 0.0
C2311 = 0.0
C2322 = 0.0
C2333 = 0.0
C2312 = 0.0
C2323 = 2.0*self.lameMu
C2313 = 0.0
C1311 = 0.0
C1322 = 0.0
C1333 = 0.0
C1312 = 0.0
C1323 = 0.0
C1313 = 2.0*self.lameMu
D = numpy.array([ [C1111, C1122, C1133, 0.5*C1112, 0.5*C1123, 0.5*C1113],
[C2211, C2222, C2233, 0.5*C2212, 0.5*C2223, 0.5*C2213],
[C3311, C3322, C3333, 0.5*C3312, 0.5*C3323, 0.5*C3313],
[0.5*C1211, 0.5*C1222, 0.5*C1233, 0.5*C1212, 0.5*C1223, 0.5*C1213],
[0.5*C2311, 0.5*C2322, 0.5*C2333, 0.5*C2312, 0.5*C2323, 0.5*C2313],
[0.5*C1311, 0.5*C1322, 0.5*C1333, 0.5*C1312, 0.5*C1323, 0.5*C1313] ],
dtype=numpy.float64)
return D
def _calculateBasisMat(self, iQuad):
"""
Calculate matrix of basis functions.
"""
N = numpy.zeros( (self.spaceDim, self.spaceDim*self.numBasis),
dtype=numpy.float64)
for iBasis in xrange(self.numBasis):
for iDim in xrange(self.spaceDim):
N[iDim, iBasis*self.spaceDim+iDim] = self.basis[iQuad, iBasis]
return N
def _calculateBasisDerivMat(self, basisDeriv, iQuad):
"""
Calculate matrix of derivatives of basis functions.
"""
if 3 == self.spaceDim:
B = numpy.zeros( (6, self.spaceDim*self.numBasis),
dtype=numpy.float64)
for iBasis in xrange(self.numBasis):
B[0, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 0]
B[1, iBasis*self.spaceDim+1] = basisDeriv[iQuad, iBasis, 1]
B[2, iBasis*self.spaceDim+2] = basisDeriv[iQuad, iBasis, 2]
B[3, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 1]
B[3, iBasis*self.spaceDim+1] = basisDeriv[iQuad, iBasis, 0]
B[4, iBasis*self.spaceDim+1] = basisDeriv[iQuad, iBasis, 2]
B[4, iBasis*self.spaceDim+2] = basisDeriv[iQuad, iBasis, 1]
B[5, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 2]
B[5, iBasis*self.spaceDim+2] = basisDeriv[iQuad, iBasis, 0]
elif 2 == self.spaceDim:
B = numpy.zeros( (3, self.spaceDim*self.numBasis),
dtype=numpy.float64)
for iBasis in xrange(self.numBasis):
B[0, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 0]
B[1, iBasis*self.spaceDim+1] = basisDeriv[iQuad, iBasis, 1]
B[2, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 1]
B[2, iBasis*self.spaceDim+1] = basisDeriv[iQuad, iBasis, 0]
elif 1 == self.spaceDim:
B = numpy.zeros( (1, self.spaceDim*self.numBasis),
dtype=numpy.float64)
for iBasis in xrange(self.numBasis):
B[0, iBasis*self.spaceDim+0] = basisDeriv[iQuad, iBasis, 0]
else:
raise ValueError("Unknown spatial dimension '%d'." % self.spaceDim)
return B
# MAIN /////////////////////////////////////////////////////////////////
if __name__ == "__main__":
app = ElasticityApp()
app.run()
# End of file
| 32.672026 | 93 | 0.581931 |
ace99d549a679c8d6e4396dba4af4c5e3ac8b26a | 823 | py | Python | base/migrations/0006_auto_20200511_1310.py | Sanquira/immortalfighters | 388018bfb5df4e4fdadb866a599b46e0387add6e | [
"MIT"
] | null | null | null | base/migrations/0006_auto_20200511_1310.py | Sanquira/immortalfighters | 388018bfb5df4e4fdadb866a599b46e0387add6e | [
"MIT"
] | 5 | 2020-02-20T10:20:33.000Z | 2021-09-22T18:43:04.000Z | base/migrations/0006_auto_20200511_1310.py | Sanquira/immortalfighters | 388018bfb5df4e4fdadb866a599b46e0387add6e | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-05-11 11:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0005_ifuser_chat_sounds'),
]
operations = [
migrations.RenameField(
model_name='character',
old_name='character_name',
new_name='name',
),
migrations.AddField(
model_name='character',
name='health',
field=models.IntegerField(default=0, verbose_name='Životy'),
preserve_default=False,
),
migrations.AddField(
model_name='character',
name='max_health',
field=models.PositiveIntegerField(default=0, verbose_name='Maximální životy'),
preserve_default=False,
),
]
| 26.548387 | 90 | 0.582017 |
ace99dc18353cb254a42dd60a180dd2d131959bf | 368 | py | Python | social/migrations/0008_alter_profile_profile_img_url.py | TownOneWheel/townonewheel | 9feb120b7541b31d99b63c95edc7949005ab7862 | [
"MIT"
] | null | null | null | social/migrations/0008_alter_profile_profile_img_url.py | TownOneWheel/townonewheel | 9feb120b7541b31d99b63c95edc7949005ab7862 | [
"MIT"
] | 18 | 2021-07-01T08:35:13.000Z | 2021-07-25T08:18:09.000Z | social/migrations/0008_alter_profile_profile_img_url.py | TownOneWheel/townonewheel | 9feb120b7541b31d99b63c95edc7949005ab7862 | [
"MIT"
] | 1 | 2021-07-13T05:12:14.000Z | 2021-07-13T05:12:14.000Z | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('social', '0007_auto_20210713_0117'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='profile_img_url',
field=models.TextField(blank=True, default=True, null=True),
),
]
| 21.647059 | 72 | 0.603261 |
ace99ec36d70350a818b6431276bb304b35082a7 | 1,015 | py | Python | predict.py | Mutefish0/graduation-project | b6c47e946a6ed2fe25389881828e15b5e522eeb5 | [
"Apache-2.0"
] | null | null | null | predict.py | Mutefish0/graduation-project | b6c47e946a6ed2fe25389881828e15b5e522eeb5 | [
"Apache-2.0"
] | null | null | null | predict.py | Mutefish0/graduation-project | b6c47e946a6ed2fe25389881828e15b5e522eeb5 | [
"Apache-2.0"
] | null | null | null | #coding=utf8
import tensorflow as tf
x = tf.placeholder("float", [None, 784])
y_ = tf.placeholder("float", [None,10])
# 权重参数
W = tf.Variable(tf.zeros([784, 10]))
# 偏置参数
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
# 取出模型
sess = tf.Session()
saver = tf.train.Saver([W, b])
saver.restore(sess, './model_data/model.ckpt')
# 预测
def predict(images):
return sess.run(y, {x: images})
###### 正确率测试
# 预测正确性列表 [ True, False, False, True ...]
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
# 正确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# 测试集正确率
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print "测试集正确率:", sess.run(accuracy, {x: mnist.test.images, y_: mnist.test.labels})
# 矫正集正确率
import sys
sys.path.append('tools/')
from tool import load_data_from_imgs
images, labels = load_data_from_imgs('correct/')
print "矫正集正确率: ", sess.run(accuracy, {x: images, y_: labels})
| 23.604651 | 82 | 0.696552 |
ace99f01af45e3573b28673e665b486e52d1e014 | 3,569 | py | Python | env/lib/python3.5/site-packages/tenacity/retry.py | creekhead/RPI_google_asst | 65dc7b08bb8333f8977488f37f7d3ec652489a44 | [
"Apache-2.0"
] | 224 | 2020-01-02T10:46:37.000Z | 2022-03-02T13:54:08.000Z | env/lib/python3.5/site-packages/tenacity/retry.py | creekhead/RPI_google_asst | 65dc7b08bb8333f8977488f37f7d3ec652489a44 | [
"Apache-2.0"
] | 16 | 2020-03-11T09:37:58.000Z | 2022-01-26T10:22:08.000Z | env/lib/python3.5/site-packages/tenacity/retry.py | creekhead/RPI_google_asst | 65dc7b08bb8333f8977488f37f7d3ec652489a44 | [
"Apache-2.0"
] | 24 | 2020-03-24T13:53:50.000Z | 2022-03-22T11:55:18.000Z | # Copyright 2016 Julien Danjou
# Copyright 2016 Joshua Harlow
# Copyright 2013-2014 Ray Holder
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class retry_base(object):
"""Abstract base class for retry strategies."""
@abc.abstractmethod
def __call__(self, attempt):
pass
def __and__(self, other):
return retry_all(self, other)
def __or__(self, other):
return retry_any(self, other)
class _retry_never(retry_base):
"""Retry strategy that never rejects any result."""
def __call__(self, attempt):
return False
retry_never = _retry_never()
class _retry_always(retry_base):
"""Retry strategy that always rejects any result."""
def __call__(self, attempt):
return True
retry_always = _retry_always()
class retry_if_exception(retry_base):
"""Retry strategy that retries if an exception verifies a predicate."""
def __init__(self, predicate):
self.predicate = predicate
def __call__(self, attempt):
if attempt.failed:
return self.predicate(attempt.exception())
class retry_if_exception_type(retry_if_exception):
"""Retries if an exception has been raised of one or more types."""
def __init__(self, exception_types=Exception):
self.exception_types = exception_types
super(retry_if_exception_type, self).__init__(
lambda e: isinstance(e, exception_types))
class retry_unless_exception_type(retry_if_exception):
"""Retries until an exception is raised of one or more types."""
def __init__(self, exception_types=Exception):
self.exception_types = exception_types
super(retry_unless_exception_type, self).__init__(
lambda e: not isinstance(e, exception_types))
def __call__(self, attempt):
# always retry if no exception was raised
if not attempt.failed:
return True
return self.predicate(attempt.exception())
class retry_if_result(retry_base):
"""Retries if the result verifies a predicate."""
def __init__(self, predicate):
self.predicate = predicate
def __call__(self, attempt):
if not attempt.failed:
return self.predicate(attempt.result())
class retry_if_not_result(retry_base):
"""Retries if the result refutes a predicate."""
def __init__(self, predicate):
self.predicate = predicate
def __call__(self, attempt):
if not attempt.failed:
return not self.predicate(attempt.result())
class retry_any(retry_base):
"""Retries if any of the retries condition is valid."""
def __init__(self, *retries):
self.retries = retries
def __call__(self, attempt):
return any(map(lambda x: x(attempt), self.retries))
class retry_all(retry_base):
"""Retries if all the retries condition are valid."""
def __init__(self, *retries):
self.retries = retries
def __call__(self, attempt):
return all(map(lambda x: x(attempt), self.retries))
| 27.037879 | 75 | 0.695153 |
ace99f931464ec358658bd54d970400ec04f8d8b | 5,108 | py | Python | dataget/dataset/dataset.py | charlielito/dataget | b1c06e3f8dfc947c2c2e7735c44963d5b796ae12 | [
"MIT"
] | null | null | null | dataget/dataset/dataset.py | charlielito/dataget | b1c06e3f8dfc947c2c2e7735c44963d5b796ae12 | [
"MIT"
] | null | null | null | dataget/dataset/dataset.py | charlielito/dataget | b1c06e3f8dfc947c2c2e7735c44963d5b796ae12 | [
"MIT"
] | 2 | 2018-09-10T11:11:29.000Z | 2018-11-26T15:31:23.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# __coconut_hash__ = 0x84c0064d
# Compiled with Coconut version 1.2.3-post_dev1 [Colonel]
# Coconut Header: --------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
import sys as _coconut_sys, os.path as _coconut_os_path
_coconut_file_path = _coconut_os_path.dirname(_coconut_os_path.abspath(__file__))
_coconut_sys.path.insert(0, _coconut_file_path)
from __coconut__ import _coconut, _coconut_MatchError, _coconut_tail_call, _coconut_tco, _coconut_igetitem, _coconut_compose, _coconut_pipe, _coconut_starpipe, _coconut_backpipe, _coconut_backstarpipe, _coconut_bool_and, _coconut_bool_or, _coconut_minus, _coconut_map, _coconut_partial
from __coconut__ import *
_coconut_sys.path.remove(_coconut_file_path)
# Compiled Coconut: ------------------------------------------------------
import os
import shutil
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
class DataSet(object):
__metaclass__ = ABCMeta
def __init__(self, name, home_path):
self.name = name
self.path = os.path.join(home_path, self.name)
self.training_set = self.training_set_class(self, "training-set")
self.test_set = self.test_set_class(self, "test-set")
def make_dirs(self):
if not os.path.exists(self.path):
os.makedirs(self.path)
self.training_set.make_dirs()
self.test_set.make_dirs()
def get(self, rm=False, rm_compressed=True, process=True, rm_raw=True, **kwargs):
# rm
if rm:
self.rm()
# return if path exists, dataset downloaded already, else create path
if not self.is_empty():
return self
# get data
self.download(**kwargs).extract(**kwargs)
# clean
if rm_compressed:
self.rm_compressed(**kwargs)
# process
if process:
self.process(**kwargs)
if rm_raw:
self.rm_raw()
return self
def download(self, rm=False, **kwargs):
print("===DOWNLOAD===")
# rm
if rm:
self.rm()
if not self.is_empty():
return self
self.make_dirs()
self._download(**kwargs)
print("")
return self
def extract(self, **kwargs):
print("===EXTRACT===")
self.make_dirs()
self._extract(**kwargs)
print("")
return self
def rm_compressed(self, **kwargs):
print("===RM-COMPRESSED===")
self._rm_compressed(**kwargs)
print("")
return self
def process(self, **kwargs):
print("===PROCESS===")
self._process(**kwargs)
print("")
return self
def rm_raw(self, **kwargs):
print("===RM-RAW===")
self._rm_raw(**kwargs)
print("")
return self
def rm(self):
if os.path.exists(self.path):
(print)((_coconut.operator.itemgetter(-1))(self.path.split("/")))
shutil.rmtree(self.path)
return self
def rm_subsets(self):
if os.path.exists(self.training_set.path):
shutil.rmtree(self.training_set.path)
if os.path.exists(self.test_set.path):
shutil.rmtree(self.test_set.path)
return self
def is_empty(self):
if not os.path.exists(self.path):
return True
else:
return not os.listdir(self.path)
@abstractproperty
def training_set_class(self):
pass
@abstractproperty
def test_set_class(self):
pass
@abstractproperty
def help(self):
pass
@abstractmethod
def _download(self):
pass
@abstractmethod
def _extract(self):
pass
def _rm_compressed(self):
print("removing compressed files")
for file in os.listdir(self.path):
file = os.path.join(self.path, file)
if not os.path.isdir(file):
os.remove(file)
def remove_all_file_with_extension(self, extension):
for root, dirs, files in os.walk(self.path):
for file in files:
file = os.path.join(root, file)
if file.endswith(".{}".format(extension)):
os.remove(file)
@abstractmethod
def _process(self):
pass
@abstractmethod
def _rm_raw(self):
pass
@abstractmethod
def reqs(self, **kwargs):
pass
class SubSet(object):
__metaclass__ = ABCMeta
def __init__(self, dataset, name):
self.dataset = dataset
self.path = os.path.join(dataset.path, name)
def make_dirs(self):
if not os.path.exists(self.path):
os.makedirs(self.path)
@abstractmethod
def dataframe(self):
pass
@abstractmethod
def arrays(self):
pass
@abstractmethod
def random_batch_dataframe_generator(self, batch_size):
pass
@abstractmethod
def random_batch_arrays_generator(self, batch_size):
pass
| 22.305677 | 285 | 0.600822 |
ace9a1b6870f8c0d5bf153d3aab10683ec6e9b69 | 1,285 | py | Python | src/Solu15.py | wsdmakeup/codePractice | b722bd85dc064662ba2ddf1ab4ccb01039e478eb | [
"MIT"
] | null | null | null | src/Solu15.py | wsdmakeup/codePractice | b722bd85dc064662ba2ddf1ab4ccb01039e478eb | [
"MIT"
] | null | null | null | src/Solu15.py | wsdmakeup/codePractice | b722bd85dc064662ba2ddf1ab4ccb01039e478eb | [
"MIT"
] | null | null | null | #coding = utf-8
'''
Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
Note: The solution set must not contain duplicate triplets.
For example, given array S = [-1, 0, 1, 2, -1, -4],
A solution set is:
[
[-1, 0, 1],
[-1, -1, 2]
]
'''
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
def twoSum(nums2, target):
result = []
for idx,x in enumerate(nums2):
if (target-x) in nums2[idx+1:]:
result.append([x, target-x])
return result
nums.sort()
result = []
for idx, x in enumerate(nums):
target = 0 - x
t = twoSum(nums[idx+1:], target)
if len(t)>0:
for combo in t:
if combo:
combo.insert(0,x)
if combo not in result:
result.append(combo)
return result
def twoSum(self, nums, target):
result = []
for idx,x in enumerate(nums):
if (target-x) in nums[idx+1:]:
result.append([x, target-x])
return result
if __name__ == '__main__':
nums = [-1, 0, 1, 2, -1, -4]
print Solution().threeSum(nums) | 23.363636 | 155 | 0.538521 |
ace9a2d68d04fbd83477cecf85c7b24cd142497a | 1,177 | py | Python | hddcoin/plotters/install_plotter.py | u4ma-hdd/hddcoin-blockchain | 4199d1f1d87e129ae9c08bf50dd48ec3b2c08727 | [
"Apache-2.0"
] | 37 | 2021-07-08T23:42:01.000Z | 2022-03-26T21:30:10.000Z | hddcoin/plotters/install_plotter.py | u4ma-hdd/hddcoin-blockchain | 4199d1f1d87e129ae9c08bf50dd48ec3b2c08727 | [
"Apache-2.0"
] | 13 | 2021-07-11T15:12:01.000Z | 2022-03-15T08:36:18.000Z | hddcoin/plotters/install_plotter.py | u4ma-hdd/hddcoin-blockchain | 4199d1f1d87e129ae9c08bf50dd48ec3b2c08727 | [
"Apache-2.0"
] | 19 | 2021-07-10T14:09:07.000Z | 2022-03-14T11:17:05.000Z | import os
from hddcoin.plotters.bladebit import install_bladebit
from hddcoin.plotters.madmax import install_madmax
def install_plotter(plotter, root_path):
if plotter == "chiapos":
print("Chiapos already installed. No action taken.")
return
elif plotter == "madmax":
if not os.path.exists(root_path / "madmax-plotter/build/hddcoin_plot"):
print("Installing madmax plotter.")
try:
install_madmax(root_path)
except Exception as e:
print(f"Exception while installing madmax plotter: {e}")
return
else:
print("Madmax plotter already installed.")
elif plotter == "bladebit":
if not os.path.exists(root_path / "bladebit/.bin/release/bladebit"):
print("Installing bladebit plotter.")
try:
install_bladebit(root_path)
except Exception as e:
print(f"Exception while installing bladebit plotter: {e}")
return
else:
print("Bladebit plotter already installed.")
else:
print("Unknown plotter. No action taken.")
return
| 35.666667 | 79 | 0.605777 |
ace9a3472fe7e703536aa714029d59ceb08c0a1b | 2,652 | py | Python | lib/scitools/easyviz/tests/test_legend.py | sammympie/scitools | 776c5bbfb0752ef20f242a8d0ecaa66d9141282c | [
"BSD-3-Clause"
] | 62 | 2015-03-28T18:07:51.000Z | 2022-02-12T20:32:36.000Z | lib/scitools/easyviz/tests/test_legend.py | sammympie/scitools | 776c5bbfb0752ef20f242a8d0ecaa66d9141282c | [
"BSD-3-Clause"
] | 7 | 2015-06-09T09:56:03.000Z | 2021-05-20T17:53:15.000Z | lib/scitools/easyviz/tests/test_legend.py | sammympie/scitools | 776c5bbfb0752ef20f242a8d0ecaa66d9141282c | [
"BSD-3-Clause"
] | 29 | 2015-04-16T03:48:57.000Z | 2022-02-03T22:06:52.000Z | """Test functions for legend command."""
from easyviztest import *
class test_legend_basic(EasyvizTestCase):
def check_legend_simple(self):
x,y,v,w = self.get_line_data()
plot(x,y)
legend('sin(x)*x')
title("plot(..);legend('sin(x)*x')")
n()
def check_legend_simple_kwarg(self):
x,y,v,w = self.get_line_data()
plot(x,y,legend='sin(x)*x')
title("plot(..,legend='sin(x)*x')")
n()
def check_legend_two_lines(self):
x,y,v,w = self.get_line_data()
plot(x,y,x,v)
legend('sin(x)*x','sin(x)*sqrt(x)')
title("plot(..);legend('sin(x)*x','sin(x)*sqrt(x)'")
n()
def check_legend_two_lines_kwarg(self):
x,y,v,w = self.get_line_data()
plot(x,y,x,v,legend=('sin(x)*x','sin(x)*sqrt(x)'))
title("plot(..,legend=('sin(x)*x','sin(x)*sqrt(x)')")
n()
def check_legend_two_lines_with_hold(self):
x,y,v,w = self.get_line_data()
setp(show=False)
plot(x,y,'r:')
legend('sin(x)*x')
hold('on')
plot(x,v,'b-o')
legend('sin(x)*sqrt(x)')
title("plot();legend();hold('on');plot();legend()")
hold('off')
setp(show=screenplot)
show()
n()
def check_legend_three_lines(self):
x,y,v,w = self.get_line_data()
plot(y,v,w,x=x)
legend('sin(x)*x','sin(x)*sqrt(x)','sin(x)*x**0.33333333')
title("legend('sin(x)*x','sin(x)*sqrt(x)','sin(x)*x**0.33333333')")
n()
def check_legend_three_lines_kwarg(self):
x,y,v,w = self.get_line_data()
plot(y,v,w,x=x,
legend=('sin(x)*x','sin(x)*sqrt(x)','sin(x)*x**0.33333333'))
title("plot(..,legend=('sin(x)*x','sin(x)*sqrt(x)','sin(x)*x**0.33333333'))")
n()
def check_legend_multiple_lines(self):
format = self.get_format_string_data()
m = len(format)
x = linspace(0,1,m)
setp(show=False)
for i in range(1,m+1):
y = linspace(i,m/2.0,m)
plot(x,y,format[i-1],hold='on')
legend('line%d' % i)
title("legends on multiple lines")
hold('off')
setp(show=screenplot)
show()
n()
def test_suite(level=1):
suites = []
if level > 0:
suites.append(unittest.makeSuite(test_legend_basic,'check_'))
total_suite = unittest.TestSuite(suites)
return total_suite
def test(level=10):
all_tests = test_suite()
runner = unittest.TextTestRunner()
runner.run(all_tests)
return runner
if __name__ == "__main__":
test()
raw_input("press enter to exit")
| 28.516129 | 85 | 0.542232 |
ace9a37a4f8241e36da45525d67f3d5b67cc1beb | 866 | py | Python | openprocurement/auctions/geb/managers/questioners.py | oleksiyVeretiuk/openprocurement.auctions.geb | 2965b52bf8826b9a8f8870c9a4d2052f945f5799 | [
"Apache-2.0"
] | null | null | null | openprocurement/auctions/geb/managers/questioners.py | oleksiyVeretiuk/openprocurement.auctions.geb | 2965b52bf8826b9a8f8870c9a4d2052f945f5799 | [
"Apache-2.0"
] | null | null | null | openprocurement/auctions/geb/managers/questioners.py | oleksiyVeretiuk/openprocurement.auctions.geb | 2965b52bf8826b9a8f8870c9a4d2052f945f5799 | [
"Apache-2.0"
] | null | null | null |
from zope.interface import implementer
from openprocurement.auctions.core.interfaces import (
IAuctionQuestioner
)
from openprocurement.auctions.geb.validation import (
validate_question_adding_period
)
@implementer(IAuctionQuestioner)
class AuctionQuestioner(object):
name = 'Auction Questioner'
validators = [validate_question_adding_period]
def __init__(self, request, context):
self._request = request
self._context = context
def validate(self):
for validator in self.validators:
if not validator(self._request):
return
return True
def add_question(self):
if self.validate():
question = self._request.validated['question']
self._context.questions.append(question)
self._context.modified = True
return question
| 25.470588 | 58 | 0.681293 |
ace9a398e35e1d1748a70dffe56c2e2fab9099ad | 738 | py | Python | resources/Rproducts.py | josexmercado/2clear | b35d0252002fd84af8d68b8e2dca051d4b1f492e | [
"MIT"
] | null | null | null | resources/Rproducts.py | josexmercado/2clear | b35d0252002fd84af8d68b8e2dca051d4b1f492e | [
"MIT"
] | 4 | 2020-06-25T05:51:48.000Z | 2021-09-29T17:17:19.000Z | resources/Rproducts.py | josexmercado/2clear | b35d0252002fd84af8d68b8e2dca051d4b1f492e | [
"MIT"
] | null | null | null | from flask_restful import Resource, reqparse
from models.Rproducts import Rproducts
class Registerrentalproducts(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('rproductname',
type=str,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('rprice',
type=str,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('rquantity',
type=str,
required=True,
help="This field cannot be left blank!"
)
data = parser.parse_args()
new_rproducts = Rproducts(
rproductname=data.rproductname,
rprice=data.rprice,
rquantity=data.rquantity
)
new_rproducts.insert()
return {'message':'Product Registered!'} | 23.806452 | 44 | 0.719512 |
ace9a435fa894b6cedd56b8ce10688f42bad459e | 7,546 | py | Python | WebHub/WebHub/user_agents.py | hugetiny/WebHubBot | 639274717a15231f1036443d91c018b8db988802 | [
"MIT"
] | 357 | 2020-04-15T00:24:00.000Z | 2022-03-30T11:18:43.000Z | PornHub/PornHub/user_agents.py | Flings/WebHubBot | 4ca388aeabad0e97c0f601ab0f773d4b1ee5039e | [
"MIT"
] | 5 | 2020-10-24T20:05:15.000Z | 2022-03-02T14:56:10.000Z | PornHub/PornHub/user_agents.py | Flings/WebHubBot | 4ca388aeabad0e97c0f601ab0f773d4b1ee5039e | [
"MIT"
] | 121 | 2020-04-21T11:48:13.000Z | 2022-03-21T12:04:52.000Z | # encoding=utf-8
agents = [
"Mozilla/5.0 (Linux; U; Android 2.3.6; en-us; Nexus S Build/GRK39F) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Avant Browser/1.2.789rel1 (http://www.avantbrowser.com)",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre",
"Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0 )",
"Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90)",
"Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a",
"Mozilla/2.02E (Win95; U)",
"Mozilla/3.01Gold (Win95; I)",
"Mozilla/4.8 [en] (Windows NT 5.1; U)",
"Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)",
"HTC_Dream Mozilla/5.0 (Linux; U; Android 1.5; en-ca; Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.2; U; de-DE) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/234.40.1 Safari/534.6 TouchPad/1.0",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; sdk Build/CUPCAKE) AppleWebkit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 2.1; en-us; Nexus One Build/ERD62) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; htc_bahamas Build/CRB17) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 2.1-update1; de-de; HTC Desire 1.19.161.5 Build/ERE27) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Sprint APA9292KT Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 1.5; de-ch; HTC Hero Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; ADR6300 Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.1; en-us; HTC Legend Build/cupcake) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 1.5; de-de; HTC Magic Build/PLAT-RC33) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1 FirePHP/0.3",
"Mozilla/5.0 (Linux; U; Android 1.6; en-us; HTC_TATTOO_A3288 Build/DRC79) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.0; en-us; dream) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; T-Mobile G1 Build/CRB43) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari 525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; en-gb; T-Mobile_G2_Touch Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 2.0; en-us; Droid Build/ESD20) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Droid Build/FRG22D) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.0; en-us; Milestone Build/ SHOLS_U2_01.03.1) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.0.1; de-de; Milestone Build/SHOLS_U2_01.14.0) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"Mozilla/5.0 (Linux; U; Android 0.5; en-us) AppleWebKit/522 (KHTML, like Gecko) Safari/419.3",
"Mozilla/5.0 (Linux; U; Android 1.1; en-gb; dream) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"Mozilla/5.0 (Linux; U; Android 2.0; en-us; Droid Build/ESD20) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.1; en-us; Nexus One Build/ERD62) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Sprint APA9292KT Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; ADR6300 Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-ca; GT-P1000M Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 3.0.1; fr-fr; A500 Build/HRI66) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"Mozilla/5.0 (Linux; U; Android 1.6; es-es; SonyEricssonX10i Build/R1FA016) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.6; en-us; SonyEricssonX10i Build/R1AA056) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
]
| 112.626866 | 164 | 0.685264 |
ace9a5231c5be93c102c6f8c4823f8e4fdf734a3 | 2,682 | py | Python | SCRAPE/Lib/site-packages/twisted/python/sendmsg.py | Chinmoy-Prasad-Dutta/scrapy_scraper | 09f6abfc3bcf10ee28f486d83b450c89a07e066e | [
"MIT"
] | 4,612 | 2015-01-01T12:57:23.000Z | 2022-03-30T01:08:23.000Z | SCRAPE/Lib/site-packages/twisted/python/sendmsg.py | Chinmoy-Prasad-Dutta/scrapy_scraper | 09f6abfc3bcf10ee28f486d83b450c89a07e066e | [
"MIT"
] | 1,243 | 2015-01-23T17:23:59.000Z | 2022-03-28T13:46:17.000Z | SCRAPE/Lib/site-packages/twisted/python/sendmsg.py | Chinmoy-Prasad-Dutta/scrapy_scraper | 09f6abfc3bcf10ee28f486d83b450c89a07e066e | [
"MIT"
] | 1,236 | 2015-01-13T14:41:26.000Z | 2022-03-17T07:12:36.000Z | # -*- test-case-name: twisted.test.test_sendmsg -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
sendmsg(2) and recvmsg(2) support for Python.
"""
from collections import namedtuple
from socket import CMSG_SPACE, SCM_RIGHTS, socket as Socket
from typing import List, Tuple
__all__ = ["sendmsg", "recvmsg", "getSocketFamily", "SCM_RIGHTS"]
ReceivedMessage = namedtuple("ReceivedMessage", ["data", "ancillary", "flags"])
def sendmsg(
socket: Socket,
data: bytes,
ancillary: List[Tuple[int, int, bytes]] = [],
flags: int = 0,
) -> int:
"""
Send a message on a socket.
@param socket: The socket to send the message on.
@param data: Bytes to write to the socket.
@param ancillary: Extra data to send over the socket outside of the normal
datagram or stream mechanism. By default no ancillary data is sent.
@param flags: Flags to affect how the message is sent. See the C{MSG_}
constants in the sendmsg(2) manual page. By default no flags are set.
@return: The return value of the underlying syscall, if it succeeds.
"""
return socket.sendmsg([data], ancillary, flags)
def recvmsg(
socket: Socket, maxSize: int = 8192, cmsgSize: int = 4096, flags: int = 0
) -> ReceivedMessage:
"""
Receive a message on a socket.
@param socket: The socket to receive the message on.
@param maxSize: The maximum number of bytes to receive from the socket using
the datagram or stream mechanism. The default maximum is 8192.
@param cmsgSize: The maximum number of bytes to receive from the socket
outside of the normal datagram or stream mechanism. The default maximum
is 4096.
@param flags: Flags to affect how the message is sent. See the C{MSG_}
constants in the sendmsg(2) manual page. By default no flags are set.
@return: A named 3-tuple of the bytes received using the datagram/stream
mechanism, a L{list} of L{tuple}s giving ancillary received data, and
flags as an L{int} describing the data received.
"""
# In Twisted's _sendmsg.c, the csmg_space was defined as:
# int cmsg_size = 4096;
# cmsg_space = CMSG_SPACE(cmsg_size);
# Since the default in Python 3's socket is 0, we need to define our
# own default of 4096. -hawkie
data, ancillary, flags = socket.recvmsg(maxSize, CMSG_SPACE(cmsgSize), flags)[0:3]
return ReceivedMessage(data=data, ancillary=ancillary, flags=flags)
def getSocketFamily(socket: Socket) -> int:
"""
Return the family of the given socket.
@param socket: The socket to get the family of.
"""
return socket.family
| 34.831169 | 86 | 0.687919 |
ace9a52cc78986f9c45cb1b5f5d984186fe6e364 | 14,158 | py | Python | keras_/kerascv/models/shufflenetv2.py | huangwenwenlili/imgclsmob | 1505fd61acbed429773f5c7ce286c858fc2278b8 | [
"MIT"
] | 1 | 2021-01-08T04:55:45.000Z | 2021-01-08T04:55:45.000Z | keras_/kerascv/models/shufflenetv2.py | huangwenwenlili/imgclsmob | 1505fd61acbed429773f5c7ce286c858fc2278b8 | [
"MIT"
] | null | null | null | keras_/kerascv/models/shufflenetv2.py | huangwenwenlili/imgclsmob | 1505fd61acbed429773f5c7ce286c858fc2278b8 | [
"MIT"
] | null | null | null | """
ShuffleNet V2, implemented in Keras.
Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
"""
__all__ = ['shufflenetv2', 'shufflenetv2_wd2', 'shufflenetv2_w1', 'shufflenetv2_w3d2', 'shufflenetv2_w2']
import os
from keras import backend as K
from keras import layers as nn
from keras.models import Model
from .common import conv2d, conv1x1, max_pool2d_ceil, channel_shuffle_lambda, se_block, GluonBatchNormalization
def shuffle_conv(x,
in_channels,
out_channels,
kernel_size,
strides,
padding,
name="shuffle_conv"):
"""
ShuffleNetV2 specific convolution block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
name : str, default 'shuffle_conv'
Block name.
Returns
-------
keras.backend tensor/variable/symbol
Resulted tensor and preactivated input tensor.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
name=name + "/conv")
x = GluonBatchNormalization(name=name + "/bn")(x)
x = nn.Activation("relu", name=name + "/activ")(x)
return x
def shuffle_conv1x1(x,
in_channels,
out_channels,
name="shuffle_conv1x1"):
"""
1x1 version of the ShuffleNetV2 specific convolution block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
name : str, default 'shuffle_conv1x1'
Block name.
Returns
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
return shuffle_conv(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=1,
padding=0,
name=name)
def depthwise_conv3x3(x,
channels,
strides,
name="depthwise_conv3x3"):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
name : str, default 'depthwise_conv3x3'
Block name.
Returns
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
return conv2d(
x=x,
in_channels=channels,
out_channels=channels,
kernel_size=3,
strides=strides,
padding=1,
groups=channels,
use_bias=False,
name=name)
def shuffle_unit(x,
in_channels,
out_channels,
downsample,
use_se,
use_residual,
name="shuffle_unit"):
"""
ShuffleNetV2 unit.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
downsample : bool
Whether do downsample.
use_se : bool
Whether to use SE block.
use_residual : bool
Whether to use residual connection.
name : str, default 'shuffle_unit'
Unit name.
Returns
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
mid_channels = out_channels // 2
if downsample:
y1 = depthwise_conv3x3(
x=x,
channels=in_channels,
strides=2,
name=name + "/dw_conv4")
y1 = GluonBatchNormalization(name=name + "/dw_bn4")(y1)
y1 = conv1x1(
out_channels=mid_channels,
name=name + "/expand_conv5")(y1)
y1 = GluonBatchNormalization(name=name + "/expand_bn5")(y1)
y1 = nn.Activation("relu", name=name + "/expand_activ5")(y1)
x2 = x
else:
in_split2_channels = in_channels // 2
y1 = nn.Lambda(lambda z: z[:, 0:in_split2_channels, :, :])(x)
x2 = nn.Lambda(lambda z: z[:, in_split2_channels:, :, :])(x)
y2 = conv1x1(
out_channels=mid_channels,
name=name + "/compress_conv1")(x2)
y2 = GluonBatchNormalization(name=name + "/compress_bn1")(y2)
y2 = nn.Activation("relu", name=name + "/compress_activ1")(y2)
y2 = depthwise_conv3x3(
x=y2,
channels=mid_channels,
strides=(2 if downsample else 1),
name=name + "/dw_conv2")
y2 = GluonBatchNormalization(name=name + "/dw_bn2")(y2)
y2 = conv1x1(
out_channels=mid_channels,
name=name + "/expand_conv3")(y2)
y2 = GluonBatchNormalization(name=name + "/expand_bn3")(y2)
y2 = nn.Activation("relu", name=name + "/expand_activ3")(y2)
if use_se:
y2 = se_block(
x=y2,
channels=mid_channels,
name=name + "/se")
if use_residual and not downsample:
y2 = nn.add([y2, x2], name=name + "/add")
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = nn.concatenate([y1, y2], axis=channel_axis, name=name + "/concat")
x = channel_shuffle_lambda(
channels=out_channels,
groups=2,
name=name + "/c_shuffle")(x)
return x
def shuffle_init_block(x,
in_channels,
out_channels,
name="shuffle_init_block"):
"""
ShuffleNetV2 specific initial block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
name : str, default 'shuffle_init_block'
Block name.
Returns
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
x = shuffle_conv(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=2,
padding=1,
name=name + "/conv")
x = max_pool2d_ceil(
x=x,
pool_size=3,
strides=2,
padding="valid",
name=name + "/pool")
return x
def shufflenetv2(channels,
init_block_channels,
final_block_channels,
use_se=False,
use_residual=False,
in_channels=3,
in_size=(224, 224),
classes=1000):
"""
ShuffleNetV2 model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
use_se : bool, default False
Whether to use SE block.
use_residual : bool, default False
Whether to use residual connections.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
input_shape = (in_channels, 224, 224) if K.image_data_format() == 'channels_first' else (224, 224, in_channels)
input = nn.Input(shape=input_shape)
x = shuffle_init_block(
x=input,
in_channels=in_channels,
out_channels=init_block_channels,
name="features/init_block")
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
x = shuffle_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
downsample=downsample,
use_se=use_se,
use_residual=use_residual,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = shuffle_conv1x1(
x=x,
in_channels=in_channels,
out_channels=final_block_channels,
name="features/final_block")
in_channels = final_block_channels
x = nn.AvgPool2D(
pool_size=7,
strides=1,
name="features/final_pool")(x)
x = nn.Flatten()(x)
x = nn.Dense(
units=classes,
input_dim=in_channels,
name="output")(x)
model = Model(inputs=input, outputs=x)
model.in_size = in_size
model.classes = classes
return model
def get_shufflenetv2(width_scale,
model_name=None,
pretrained=False,
root=os.path.join('~', '.keras', 'models'),
**kwargs):
"""
Create ShuffleNetV2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
init_block_channels = 24
final_block_channels = 1024
layers = [4, 8, 4]
channels_per_layers = [116, 232, 464]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if width_scale > 1.5:
final_block_channels = int(final_block_channels * width_scale)
net = shufflenetv2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def shufflenetv2_wd2(**kwargs):
"""
ShuffleNetV2 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(12.0 / 29.0), model_name="shufflenetv2_wd2", **kwargs)
def shufflenetv2_w1(**kwargs):
"""
ShuffleNetV2 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=1.0, model_name="shufflenetv2_w1", **kwargs)
def shufflenetv2_w3d2(**kwargs):
"""
ShuffleNetV2 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(44.0 / 29.0), model_name="shufflenetv2_w3d2", **kwargs)
def shufflenetv2_w2(**kwargs):
"""
ShuffleNetV2 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(61.0 / 29.0), model_name="shufflenetv2_w2", **kwargs)
def _test():
import numpy as np
import keras
pretrained = False
models = [
shufflenetv2_wd2,
shufflenetv2_w1,
shufflenetv2_w3d2,
shufflenetv2_w2,
]
for model in models:
net = model(pretrained=pretrained)
# net.summary()
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenetv2_wd2 or weight_count == 1366792)
assert (model != shufflenetv2_w1 or weight_count == 2278604)
assert (model != shufflenetv2_w3d2 or weight_count == 4406098)
assert (model != shufflenetv2_w2 or weight_count == 7601686)
x = np.zeros((1, 3, 224, 224), np.float32)
y = net.predict(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 29.681342 | 115 | 0.6063 |
ace9a5b856f4571d0d5955c747ba2ec19325315a | 6,791 | py | Python | qiskit/validation/fields/custom.py | IsidoreCarinae/qiskit-terra | d98540b0868f8b0a2989c0bfd4e184d770c5a01d | [
"Apache-2.0"
] | null | null | null | qiskit/validation/fields/custom.py | IsidoreCarinae/qiskit-terra | d98540b0868f8b0a2989c0bfd4e184d770c5a01d | [
"Apache-2.0"
] | null | null | null | qiskit/validation/fields/custom.py | IsidoreCarinae/qiskit-terra | d98540b0868f8b0a2989c0bfd4e184d770c5a01d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Fields custom to Terra to be used with Qiskit validated classes."""
import numpy
import sympy
from marshmallow.utils import is_collection
from marshmallow.exceptions import ValidationError
from marshmallow.compat import Mapping
from qiskit.validation import ModelTypeValidator
class Complex(ModelTypeValidator):
"""Field for complex numbers.
Field for parsing complex numbers:
* deserializes to Python's `complex`.
* serializes to a tuple of 2 decimals `(real, imaginary)`
"""
valid_types = (complex, )
default_error_messages = {
'invalid': '{input} cannot be parsed as a complex number.',
'format': '"{input}" cannot be formatted as complex number.',
}
def _serialize(self, value, attr, obj):
try:
return [value.real, value.imag]
except AttributeError:
self.fail('format', input=value)
def _deserialize(self, value, attr, data):
if not is_collection(value) or len(value) != 2:
self.fail('invalid', input=value)
try:
return complex(*value)
except (ValueError, TypeError):
self.fail('invalid', input=value)
class InstructionParameter(ModelTypeValidator):
"""Field for objects used in instruction parameters.
This field provides support for parsing objects of types that uses by
qobj.experiments.instructions.parameters:
* basic Python types: complex, int, float, str, list
* ``numpy``: integer, float, ndarray
* ``sympy``: Symbol, Basic
Note that by using this field, serialization-deserialization round-tripping
becomes not possible, as certain types serialize to the same Python basic
type (for example, numpy.float and regular float). If possible, it is
recommended that more specific and defined fields are used instead.
"""
valid_types = (complex, int, float, str,
numpy.integer, numpy.float, sympy.Basic, sympy.Symbol,
list, numpy.ndarray)
default_error_messages = {
'invalid': '{input} cannot be parsed as a parameter.',
'format': '"{input}" cannot be formatted as a parameter.'
}
def _serialize(self, value, attr, obj):
# pylint: disable=too-many-return-statements
if is_collection(value):
return [self._serialize(item, attr, obj) for item in value]
if isinstance(value, complex):
return [value.real, value.imag]
if isinstance(value, numpy.integer):
return int(value)
if isinstance(value, numpy.float):
return float(value)
if isinstance(value, (float, int, str)):
return value
if isinstance(value, sympy.Symbol):
return str(value)
if isinstance(value, sympy.Basic):
if sympy.im(value) != 0:
return [float(sympy.re(value)), float(sympy.im(value))]
if value.is_Integer:
return int(value.evalf())
else:
return float(value.evalf())
# Fallback for attempting serialization.
if hasattr(value, 'to_dict'):
return value.to_dict()
return self.fail('format', input=value)
def _deserialize(self, value, attr, data):
if is_collection(value):
return [self._deserialize(item, attr, data) for item in value]
if isinstance(value, (float, int, str)):
return value
return self.fail('invalid', input=value)
def check_type(self, value, attr, data):
"""Customize check_type for handling containers."""
# Check the type in the standard way first, in order to fail quickly
# in case of invalid values.
root_value = super().check_type(
value, attr, data)
if is_collection(value):
_ = [super(InstructionParameter, self).check_type(item, attr, data)
for item in value]
return root_value
class DictParameters(ModelTypeValidator):
"""Field for objects used in measurement kernel and discriminator parameters.
"""
default_error_messages = {
'invalid_mapping': 'Not a valid mapping type.',
'invalid': '{input} cannot be parsed as a parameter.'
}
def __init__(self, valid_value_types, **kwargs):
"""Create new model.
Args:
valid_value_types (tuple): valid types as values.
"""
# pylint: disable=missing-param-doc
super().__init__(**kwargs)
self.valid_value_types = valid_value_types
def _expected_types(self):
return self.valid_value_types
def check_type(self, value, attr, data):
if value is None:
return None
_check_type = super().check_type
errors = []
if not isinstance(data[attr], Mapping):
self.fail('invalid_mapping')
try:
if isinstance(value, Mapping):
for v in value.values():
self.check_type(v, attr, data)
elif is_collection(value):
for v in value:
self.check_type(v, attr, data)
else:
_check_type(value, attr, data)
except ValidationError as err:
errors.append(err.messages)
if errors:
raise ValidationError(errors)
return value
def _validate_values(self, value):
if value is None:
return None
if isinstance(value, self.valid_value_types):
return value
if is_collection(value):
return [self._validate_values(each) for each in value]
if isinstance(value, Mapping):
return {str(k): self._validate_values(v) for k, v in value.items()}
return self.fail('invalid', input=value)
def _serialize(self, value, attr, obj):
if value is None:
return None
if isinstance(value, Mapping):
return {str(k): self._validate_values(v) for k, v in value.items()}
return self.fail('invalid_mapping')
def _deserialize(self, value, attr, data):
if value is None:
return None
if isinstance(value, Mapping):
return value
return self.fail('invalid_mapping')
| 32.338095 | 81 | 0.622441 |
ace9a5fdf829e56eeba4fccd384ff0ea1a64d2a3 | 3,142 | py | Python | tests/extension/thread_/uart/thread_uart.py | akmaru/veriloggen | 74f998139e8cf613f7703fa4cffd571bbf069bbc | [
"Apache-2.0"
] | 232 | 2015-09-01T16:07:48.000Z | 2022-03-28T14:53:28.000Z | tests/extension/thread_/uart/thread_uart.py | akmaru/veriloggen | 74f998139e8cf613f7703fa4cffd571bbf069bbc | [
"Apache-2.0"
] | 34 | 2015-08-21T09:13:03.000Z | 2022-03-21T23:52:44.000Z | tests/extension/thread_/uart/thread_uart.py | akmaru/veriloggen | 74f998139e8cf613f7703fa4cffd571bbf069bbc | [
"Apache-2.0"
] | 46 | 2015-09-24T14:39:57.000Z | 2022-02-23T21:59:56.000Z | from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import math
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.thread as vthread
from veriloggen.thread.uart import UartTx, UartRx
def mkLed(baudrate=19200, clockfreq=100 * 1000 * 1000):
m = Module('blinkled')
clk = m.Input('CLK')
rst = m.Input('RST')
sw = m.Input('sw', 16)
led = m.OutputReg('led', 16, initval=0)
tx = m.Output('utx')
rx = m.Input('urx')
uart_tx = UartTx(m, 'inst_tx', 'tx_', clk, rst, tx,
baudrate=baudrate, clockfreq=clockfreq)
uart_rx = UartRx(m, 'inst_rx', 'rx_', clk, rst, rx,
baudrate=baudrate, clockfreq=clockfreq)
def blink():
while True:
c = uart_rx.recv()
data = c + sw
led.value = data
uart_tx.send(data)
th = vthread.Thread(m, 'th_blink', clk, rst, blink)
fsm = th.start()
return m
def mkTest(baudrate=19200, clockfreq=19200 * 10):
m = Module('test')
# target instance
led = mkLed(baudrate, clockfreq)
uut = Submodule(m, led, name='uut', prefix='', as_wire=('utx', 'urx'))
clk = uut['CLK']
rst = uut['RST']
tx = uut['utx']
rx = uut['urx']
sw = uut['sw']
uart_tx = UartTx(m, 'inst_tx', 'tx_', clk, rst, as_wire='txd',
baudrate=baudrate, clockfreq=clockfreq)
uart_rx = UartRx(m, 'inst_rx', 'rx_', clk, rst, as_wire='rxd',
baudrate=baudrate, clockfreq=clockfreq)
txd = uart_tx['txd']
rxd = uart_rx['rxd']
rx.assign(txd)
rxd.assign(tx)
#simulation.setup_waveform(m, uut, uart_tx, uart_rx)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, m.make_reset(), period=100)
init.add(
sw(10),
Delay(1000000),
Systask('finish')
)
all_ok = m.TmpReg(initval=0)
def test():
all_ok = True
for i in range(10):
s = 100 + i
uart_tx.send(s)
r = uart_rx.recv()
if vthread.verilog.Eql(r, s + sw):
print('OK: %d + %d === %d' % (s, sw, r))
else:
print('NG: %d + %d !== %d' % (s, sw, r))
all_ok = False
if all_ok:
print('# verify: PASSED')
else:
print('# verify: FAILED')
vthread.finish()
th = vthread.Thread(m, 'test', clk, rst, test)
th.start()
return m
def run(filename='tmp.v', simtype='iverilog', outputfile=None):
test = mkTest()
if filename is not None:
test.to_verilog(filename)
sim = simulation.Simulator(test, sim=simtype)
rslt = sim.run(outputfile=outputfile)
lines = rslt.splitlines()
if simtype == 'verilator' and lines[-1].startswith('-'):
rslt = '\n'.join(lines[:-1])
return rslt
if __name__ == '__main__':
rslt = run(filename='tmp.v')
print(rslt)
| 25.966942 | 74 | 0.567791 |
ace9a66099f61edc1506bffaa6ca064e6736f914 | 26,689 | py | Python | genbank_get_genomes_by_taxon_cds.py | peterthorpe5/Nanopore | 4e9092b7d01580a9499b262ce0c8d86c756a6249 | [
"MIT"
] | null | null | null | genbank_get_genomes_by_taxon_cds.py | peterthorpe5/Nanopore | 4e9092b7d01580a9499b262ce0c8d86c756a6249 | [
"MIT"
] | null | null | null | genbank_get_genomes_by_taxon_cds.py | peterthorpe5/Nanopore | 4e9092b7d01580a9499b262ce0c8d86c756a6249 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# genbank_get_genomes_by_taxon.py
#
# Copyright 2015-2016, The James Hutton Insitute
# Author: Leighton Pritchard
#
# This code is part of the pyani package, and is governed by its licence.
# Please see the LICENSE file that should have been included as part of
# this package.
"""A script to download sequence/class/label data from NCBI
This script takes an NCBI taxonomy identifier (or string, though this is
not always reliable for taxonomy tree subgraphs...) and downloads all genomes
it can find from NCBI in the corresponding taxon subgraph that has
the passed argument as root.
"""
import logging
import logging.handlers
import os
import re
import shutil
import subprocess
import sys
import time
import traceback
from argparse import ArgumentParser
from collections import defaultdict
from socket import timeout
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
from Bio import Entrez, SeqIO
class NCBIDownloadException(Exception):
"""General exception for failed NCBI download."""
def __init__(self):
Exception.__init__(self, "Error downloading file from NCBI")
# Parse command-line
def parse_cmdline():
"""Parse command-line arguments"""
parser = ArgumentParser(prog="genbank_get_genomes_by_taxon.py")
parser.add_argument("-o", "--outdir", dest="outdirname",
action="store", default=None,
help="Output directory")
parser.add_argument("-t", "--taxon", dest="taxon",
action="store", default=None,
help="NCBI taxonomy ID")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", default=False,
help="Give verbose output")
parser.add_argument("-f", "--force", dest="force",
action="store_true", default=False,
help="Force file overwriting")
parser.add_argument("--noclobber", dest="noclobber",
action="store_true", default=False,
help="Don't nuke existing files")
parser.add_argument("-l", "--logfile", dest="logfile",
action="store", default=None,
help="Logfile location")
parser.add_argument("--format", dest="format",
action="store", default="gbk,fasta, gff",
help="Output file format [gbk|fasta|gff]")
parser.add_argument("--email", dest="email",
action="store", default=None,
help="Email associated with NCBI queries")
parser.add_argument("--retries", dest="retries",
action="store", default=20,
help="Number of Entrez retry attempts per request.")
parser.add_argument("--batchsize", dest="batchsize",
action="store", default=10000,
help="Entrez record return batch size")
parser.add_argument("--timeout", dest="timeout",
action="store", default=10,
help="Timeout for URL connection (s)")
return parser.parse_args()
# Report last exception as string
def last_exception():
""" Returns last exception as a string, or use in logging."""
exc_type, exc_value, exc_traceback = sys.exc_info()
return ''.join(traceback.format_exception(exc_type, exc_value,
exc_traceback))
# Set contact email for NCBI
def set_ncbi_email():
"""Set contact email for NCBI."""
Entrez.email = args.email
logger.info("Set NCBI contact email to %s", args.email)
Entrez.tool = "genbank_get_genomes_by_taxon.py"
# Create output directory if it doesn't exist
def make_outdir():
"""Make the output directory, if required.
This is a little involved. If the output directory already exists,
we take the safe option by default, and stop with an error. We can,
however, choose to force the program to go on, in which case we can
either clobber the existing directory, or not. The options turn out
as the following, if the directory exists:
DEFAULT: stop and report the collision
FORCE: continue, and remove the existing output directory
NOCLOBBER+FORCE: continue, but do not remove the existing output
"""
if os.path.exists(args.outdirname):
if not args.force:
logger.error("Output directory %s would overwrite existing " +
"files (exiting)", args.outdirname)
sys.exit(1)
else:
logger.info("--force output directory use")
if args.noclobber:
logger.warning("--noclobber: existing output directory kept")
else:
logger.info("Removing directory %s and everything below it",
args.outdirname)
shutil.rmtree(args.outdirname)
logger.info("Creating directory %s", args.outdirname)
try:
os.makedirs(args.outdirname) # We make the directory recursively
except OSError:
# This gets thrown if the directory exists. If we've forced overwrite/
# delete and we're not clobbering, we let things slide
if args.noclobber and args.force:
logger.info("NOCLOBBER+FORCE: not creating directory")
else:
logger.error(last_exception)
sys.exit(1)
# Retry Entrez requests (or any other function)
def entrez_retry(func, *fnargs, **fnkwargs):
"""Retries the passed function up to the number of times specified
by args.retries
"""
tries, success = 0, False
while not success and tries < args.retries:
try:
output = func(*fnargs, **fnkwargs)
success = True
except (HTTPError, URLError):
tries += 1
logger.warning("Entrez query %s(%s, %s) failed (%d/%d)",
func, fnargs, fnkwargs, tries+1, args.retries)
logger.warning(last_exception())
if not success:
logger.error("Too many Entrez failures (exiting)")
sys.exit(1)
return output
# Get results from NCBI web history, in batches
def entrez_batch_webhistory(record, expected, batchsize, *fnargs, **fnkwargs):
"""Recovers the Entrez data from a prior NCBI webhistory search, in
batches of defined size, using Efetch. Returns all results as a list.
- record: Entrez webhistory record
- expected: number of expected search returns
- batchsize: how many search returns to retrieve in a batch
- *fnargs: arguments to Efetch
- **fnkwargs: keyword arguments to Efetch
"""
results = []
for start in range(0, expected, batchsize):
batch_handle = entrez_retry(Entrez.efetch,
retstart=start, retmax=batchsize,
webenv=record["WebEnv"],
query_key=record["QueryKey"],
*fnargs, **fnkwargs)
batch_record = Entrez.read(batch_handle, validate=False)
results.extend(batch_record)
return results
# Get assembly UIDs for the root taxon
def get_asm_uids(taxon_uid):
"""Returns a set of NCBI UIDs associated with the passed taxon.
This query at NCBI returns all assemblies for the taxon subtree
rooted at the passed taxon_uid.
"""
query = "txid%s[Organism:exp]" % taxon_uid
logger.info("Entrez ESearch with query: %s", query)
# Perform initial search for assembly UIDs with taxon ID as query.
# Use NCBI history for the search.
handle = entrez_retry(Entrez.esearch, db="assembly", term=query,
format="xml", usehistory="y")
record = Entrez.read(handle, validate=False)
result_count = int(record['Count'])
logger.info("Entrez ESearch returns %d assembly IDs", result_count)
# Recover assembly UIDs from the web history
asm_ids = entrez_batch_webhistory(record, result_count, 250,
db="assembly", retmode="xml")
logger.info("Identified %d unique assemblies", len(asm_ids))
return asm_ids
# Extract filestem from Entrez eSummary
def extract_filestem(data):
"""Extract filestem from Entrez eSummary data.
Function expects esummary['DocumentSummarySet']['DocumentSummary'][0]
Some illegal characters may occur in AssemblyName - for these, a more
robust regex replace/escape may be required. Sadly, NCBI don't just
use standard percent escapes, but instead replace certain
characters with underscores.
"""
escapes = re.compile(r"[\s/,]")
escname = re.sub(escapes, '_', data['AssemblyName'])
return '_'.join([data['AssemblyAccession'], escname])
# Download NCBI assembly file for a passed Assembly UID
def get_ncbi_asm(asm_uid):
"""Returns the NCBI AssemblyAccession and AssemblyName for the assembly
with passed UID, and organism data for class/label files also, as well
as accession, so we can track whether downloads fail because only the
most recent version is available..
AssemblyAccession and AssemblyName are data fields in the eSummary record,
and correspond to downloadable files for each assembly at
ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GC[AF]/nnn/nnn/nnn/<AA>_<AN>
where <AA> is AssemblyAccession, and <AN> is AssemblyName, and the choice
of GCA vs GCF, and the three values of nnn are taken from <AA>
"""
logger.info("Identifying assembly information from NCBI for %s",
asm_uid)
# Obtain full eSummary data for the assembly
summary = Entrez.read(entrez_retry(Entrez.esummary, db="assembly",
id=asm_uid, report="full"),
validate=False)
# Extract filestem from assembly data
data = summary['DocumentSummarySet']['DocumentSummary'][0]
filestem = extract_filestem(data)
# Report interesting things from the summary for those interested
logger.info("\tOrganism: %s", data['Organism'])
logger.info("\tTaxid: %s", data['SpeciesTaxid'])
logger.info("\tAccession: %s", data['AssemblyAccession'])
logger.info("\tName: %s", data['AssemblyName'])
# NOTE: Maybe parse out the assembly stats here, in future?
# Get class and label text
organism = data['SpeciesName']
try:
strain = data['Biosource']['InfraspeciesList'][0]['Sub_value']
except (KeyError, IndexError):
# we consider this an error/incompleteness in the NCBI metadata
strain = ""
# Create label and class strings
genus, species = organism.split(' ', 1)
labeltxt = "%s_genomic\t%s %s %s" % (filestem, genus[0] + '.',
species, strain)
classtxt = "%s_genomic\t%s" % (filestem, organism)
logger.info("\tLabel: %s", labeltxt)
logger.info("\tClass: %s", classtxt)
# Download and extract genome assembly
try:
fastafilename = retrieve_asm_contigs(filestem)
except NCBIDownloadException:
# This is a little hacky. Sometimes, RefSeq assemblies are
# suppressed (presumably because they are non-redundant),
# but the GenBank assembly persists. In those cases, we
# *assume* (because it may not be true) that the corresponding
# genbank sequence shares the same accession number, except
# that GCF is replaced by GCA
gbfilestem = re.sub('^GCF_', 'GCA_', filestem)
logger.warning("Could not download %s, trying %s",
filestem, gbfilestem)
try:
fastafilename = retrieve_asm_contigs(gbfilestem)
except NCBIDownloadException:
fastafilename = None
return (fastafilename, classtxt, labeltxt, data['AssemblyAccession'])
# Download and extract an NCBI assembly file, given a filestem
def retrieve_asm_contigs(filestem,
ftpstem="ftp://ftp.ncbi.nlm.nih.gov/genomes/all",
suffix="cds_from_genomic.fna.gz"):
"""Downloads an assembly sequence to a local directory.
The filestem corresponds to <AA>_<AN>, where <AA> and <AN> are
AssemblyAccession and AssemblyName: data fields in the eSummary record.
These correspond to downloadable files for each assembly at
ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GC[AF]/nnn/nnn/nnn/<AA>_<AN>/
where <AA> is AssemblyAccession, and <AN> is AssemblyName. The choice
of GCA vs GCF, and the values of nnn, are derived from <AA>
The files in this directory all have the stem <AA>_<AN>_<suffix>, where
suffixes are:
assembly_report.txt
assembly_stats.txt
feature_table.txt.gz
genomic.fna.gz
genomic.gbff.gz
genomic.gff.gz
protein.faa.gz
protein.gpff.gz
rm_out.gz
rm.run
wgsmaster.gbff.gz
This function downloads the genomic_fna.gz file, and extracts it in the
output directory name specified when the script is called.
"""
logger.info("Retrieving assembly sequence for %s", filestem)
# Compile URL
gc, aa, an = tuple(filestem.split('_', 2))
aaval = aa.split('.')[0]
subdirs = '/'.join([aa[i:i+3] for i in range(0, len(aaval), 3)])
url = "{0}/{1}/{2}/{3}/{3}_{4}".format(ftpstem, gc, subdirs,
filestem, suffix)
logger.info("Using URL: %s", url)
# Get data info
try:
response = urlopen(url, timeout=args.timeout)
except (HTTPError, URLError):
logger.error("Download failed for URL: %s\n%s",
url, last_exception())
raise NCBIDownloadException()
except timeout:
logger.error("Download timed out for URL: %s\n%s",
url, last_exception())
raise NCBIDownloadException()
else:
fsize = int(response.info().get("Content-length"))
logger.info("Opened URL and parsed metadata.")
# Download data
outfname = os.path.join(args.outdirname, '_'.join([filestem, suffix]))
if os.path.exists(outfname):
logger.warning("Output file %s exists, not downloading", outfname)
else:
logger.info("Downloading %s (%d bytes)", url, fsize)
bsize = 1048576 # buffer size
fsize_dl = 0 # bytes downloaded
try:
with open(outfname, "wb") as ofh:
while True:
buffer = response.read(bsize)
if not buffer:
break
fsize_dl += len(buffer)
ofh.write(buffer)
status = r"%10d [%3.2f%%]" % (fsize_dl,
fsize_dl * 100. / fsize)
logger.info(status)
except:
logger.error("Download failed for %s", url)
logger.error(last_exception())
raise NCBIDownloadException()
# Extract data
ename = os.path.splitext(outfname)[0] # Strips only .gz from filename
# The code below would munge the extracted filename to suit the expected
# class/label from the old version of this script.
# The .gz file downloaded from NCBI has format
# <assembly UID>_<string>_genomic.fna.gz - which we would extract to
# <assembly UID>.fna
#regex = ".{3}_[0-9]{9}.[0-9]"
#outparts = os.path.split(outfname)
#print(outparts[0])
#print(re.match(regex, outparts[-1]).group())
#ename = os.path.join(outparts[0],
# re.match(regex, outparts[-1]).group() + '.fna')
if os.path.exists(ename):
logger.warning("Output file %s exists, not extracting", ename)
else:
try:
logger.info("Extracting archive %s to %s",
outfname, ename)
with open(ename, 'w') as efh:
subprocess.call(['gunzip', '-c', outfname],
stdout=efh) # can be subprocess.run in Py3.5
logger.info("Archive extracted to %s", ename)
except:
logger.error("Extracting archive %s failed", outfname)
logger.error(last_exception())
raise NCBIDownloadException()
return ename
# Write contigs for a single assembly out to file
def write_contigs(asm_uid, contig_uids, batchsize=10000):
"""Writes assembly contigs out to a single FASTA file in the script's
designated output directory.
FASTA records are returned, as GenBank and even GenBankWithParts format
records don't reliably give correct sequence in all cases.
The script returns two strings for each assembly, a 'class' and a 'label'
string - this is for use with, e.g. pyani.
"""
# Has duplicate code with get_class_label_info() - needs refactoring
logger.info("Collecting contig data for %s", asm_uid)
# Assembly record - get binomial and strain names
asm_record = Entrez.read(entrez_retry(Entrez.esummary, db='assembly',
id=asm_uid, rettype='text'),
validate=False)
asm_organism = asm_record['DocumentSummarySet']['DocumentSummary']\
[0]['SpeciesName']
try:
asm_strain = asm_record['DocumentSummarySet']['DocumentSummary']\
[0]['Biosource']['InfraspeciesList'][0]['Sub_value']
except KeyError:
asm_strain = ""
# Assembly UID (long form) for the output filename
outfilename = "%s.fasta" % os.path.join(args.outdirname,
asm_record['DocumentSummarySet']\
['DocumentSummary']\
[0]['AssemblyAccession'])
# Create label and class strings
genus, species = asm_organism.split(' ', 1)
# Get FASTA records for contigs
logger.info("Downloading FASTA records for assembly %s (%s)",
asm_uid, ' '.join([genus[0] + '.', species, asm_strain]))
# We're doing an explicit outer retry loop here because we want to confirm
# we have the correct data, as well as test for Entrez connection errors,
# which is all the entrez_retry function does.
tries, success = 0, False
while not success and tries < args.retries:
records = [] # Holds all return records
# We may need to batch contigs
query_uids = ','.join(contig_uids)
try:
for start in range(0, len(contig_uids), batchsize):
logger.info("Batch: %d-%d", start, start+batchsize)
records.extend(list(SeqIO.parse(entrez_retry(Entrez.efetch,
db='nucleotide',
id=query_uids,
rettype='fasta',
retmode='text',
retstart=start,
retmax=batchsize),
'fasta')))
tries += 1
# Check only that correct number of records returned.
if len(records) == len(contig_uids):
success = True
else:
logger.warning("%d contigs expected, %d contigs returned",
len(contig_uids), len(records))
logger.warning("FASTA download for assembly %s failed",
asm_uid)
logger.warning("try %d/20", tries)
# Could also check expected assembly sequence length?
logger.info("Downloaded genome size: %d",
sum([len(r) for r in records]))
except:
logger.warning("FASTA download for assembly %s failed", asm_uid)
logger.warning(last_exception())
logger.warning("try %d/20", tries)
if not success:
# Could place option on command-line to stop or continue here.
logger.error("Failed to download records for %s (continuing)",
asm_uid)
# Write contigs to file
retval = SeqIO.write(records, outfilename, 'fasta')
logger.info("Wrote %d contigs to %s", retval, outfilename)
# Function to report whether an accession has been downloaded
def logreport_downloaded(accession, skippedlist, accessiondict, uidaccdict):
"""Reports to logger whether alternative assemblies for an accession that
was missing have been downloaded
"""
for vid in accessiondict[accession.split('.')[0]]:
if vid in skippedlist:
status = "NOT DOWNLOADED"
else:
status = "DOWNLOADED"
logger.warning("\t\t%s: %s - %s",
vid, uidaccdict[vid], status)
# Run as script
if __name__ == '__main__':
# Parse command-line
args = parse_cmdline()
# Set up logging
logger = logging.getLogger('genbank_get_genomes_by_taxon.py')
logger.setLevel(logging.DEBUG)
err_handler = logging.StreamHandler(sys.stderr)
err_formatter = logging.Formatter('%(levelname)s: %(message)s')
err_handler.setFormatter(err_formatter)
# Was a logfile specified? If so, use it
if args.logfile is not None:
try:
logstream = open(args.logfile, 'w')
err_handler_file = logging.StreamHandler(logstream)
err_handler_file.setFormatter(err_formatter)
err_handler_file.setLevel(logging.INFO)
logger.addHandler(err_handler_file)
except:
logger.error("Could not open %s for logging",
args.logfile)
sys.exit(1)
# Do we need verbosity?
if args.verbose:
err_handler.setLevel(logging.INFO)
else:
err_handler.setLevel(logging.WARNING)
logger.addHandler(err_handler)
# Report arguments, if verbose
logger.info("genbank_get_genomes_by_taxon.py: %s", time.asctime())
logger.info("command-line: %s", ' '.join(sys.argv))
logger.info(args)
# Have we got an output directory? If not, exit.
if args.email is None:
logger.error("No email contact address provided (exiting)")
sys.exit(1)
set_ncbi_email()
# Have we got an output directory? If not, exit.
if args.outdirname is None:
logger.error("No output directory name (exiting)")
sys.exit(1)
make_outdir()
logger.info("Output directory: %s", args.outdirname)
# We might have more than one taxon in a comma-separated list
taxon_ids = args.taxon.split(',')
logger.info("Passed taxon IDs: %s", ', '.join(taxon_ids))
# Get all NCBI assemblies for each taxon UID
asm_dict = defaultdict(set)
for tid in taxon_ids:
asm_dict[tid] = get_asm_uids(tid)
for tid, asm_uids in list(asm_dict.items()):
logger.info("Taxon %s: %d assemblies", tid, len(asm_uids))
# Download contigs for each assembly UID
classes, labels = [], []
contig_dict = defaultdict(set)
accessiondict = defaultdict(list) # UIDs, keyed by accession
uidaccdict = {} # accessions, keyed by UID
skippedlist = []
for tid, asm_uids in list(asm_dict.items()):
for uid in asm_uids:
fastafilename, classtxt, labeltxt, accession = get_ncbi_asm(uid)
# fastafilename is None if there was an error thrown
if fastafilename is not None:
contig_dict[uid] = fastafilename
else:
logger.error("Skipping download for %s", uid)
skippedlist.append(uid)
# Populate dictionaries for all attempted downloads
classes.append(classtxt)
labels.append(labeltxt)
accessiondict[accession.split('.')[0]].append(uid)
uidaccdict[uid] = accession
# Write class and label files
classfilename = os.path.join(args.outdirname, 'classes.txt')
labelfilename = os.path.join(args.outdirname, 'labels.txt')
logger.info("Writing classes file to %s", classfilename)
with open(classfilename, 'w') as ofh:
ofh.write('\n'.join(classes) + '\n')
logger.info("Writing labels file to %s", labelfilename)
with open(labelfilename, 'w') as ofh:
ofh.write('\n'.join(labels) + '\n')
# How many downloads did we do/have to skip?
logger.info("Obtained %d assemblies", len(contig_dict))
if len(skippedlist):
logger.warning("Skipped %d downloads through error", len(skippedlist))
for uid in sorted(skippedlist):
logger.warning("Assembly UID %s skipped", uid)
acc = uidaccdict[uid]
logger.warning("\tUID: %s - accession: %s", uid, acc)
# Has another version of this genome been successfully dl'ed
logger.warning("\tAccession %s has versions:", acc.split('.')[0])
logreport_downloaded(acc, skippedlist, accessiondict, uidaccdict)
url = "http://www.ncbi.nlm.nih.gov/assembly/%s" % uid
# Is this a GenBank sequence with no RefSeq counterpart?
# e.g. http://www.ncbi.nlm.nih.gov/assembly/196191/
if acc.startswith('GCA'):
logger.warning("\tAccession is GenBank: does RefSeq exist?")
logger.warning("\tCheck under 'history' at %s", url)
# Check for RefSeq counterparts
rsacc = re.sub('^GCA_', 'GCF_', uidaccdict[uid])
logger.warning("\tAlternative RefSeq candidate accession: %s",
rsacc.split('.')[0])
logger.warning("\tWere alternative assemblies downloaded?")
logreport_downloaded(rsacc, skippedlist,
accessiondict, uidaccdict)
# Is this a suppressed RefSeq sequence?
if acc.startswith('GCF'):
logger.warning("\tAccession is RefSeq: is it suppressed?")
logger.warning("\tCheck under 'history' at %s", url)
# Check for GenBank counterparts
gbacc = re.sub('^GCF_', 'GCA_', uidaccdict[uid])
logger.warning("\tAlternative GenBank candidate accession: %s",
gbacc.split('.')[0])
logger.warning("\tWere alternative assemblies downloaded?")
logreport_downloaded(gbacc, skippedlist,
accessiondict, uidaccdict)
logger.info("Skipped assembly UIDs: %s", skippedlist)
# Let the user know we're done
logger.info(time.asctime())
logger.info("Done.")
| 42.296355 | 79 | 0.607629 |
ace9a6bfecb2b7aeb1dcd156e85f7b29968d24ea | 6,075 | py | Python | sciva/helpers.py | ChengF-Lab/scIVA | f70a927531dd16236dff30decbe77f0552ad4f2d | [
"MIT"
] | null | null | null | sciva/helpers.py | ChengF-Lab/scIVA | f70a927531dd16236dff30decbe77f0552ad4f2d | [
"MIT"
] | null | null | null | sciva/helpers.py | ChengF-Lab/scIVA | f70a927531dd16236dff30decbe77f0552ad4f2d | [
"MIT"
] | 1 | 2022-01-30T17:55:44.000Z | 2022-01-30T17:55:44.000Z | import numpy as np
import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
#plt.ioff()
import seaborn as sns
from pandas import DataFrame
from sklearn.metrics import normalized_mutual_info_score,adjusted_rand_score,homogeneity_score,completeness_score,silhouette_score
from sklearn.cluster import KMeans
from sklearn.cluster import SpectralClustering
from sklearn.decomposition import PCA
from sklearn.covariance import EllipticEnvelope
markers = {',': 'pixel', 'o': 'circle','*': 'star', 'v': 'triangle_down',
'^': 'triangle_up', '<': 'triangle_left', '>': 'triangle_right',
'1': 'tri_down', '2': 'tri_up', '3': 'tri_left', '4': 'tri_right',
'8': 'octagon', 's': 'square', 'p': 'pentagon',
'h': 'hexagon1', 'H': 'hexagon2', '+': 'plus', 'x': 'x', '.': 'point',
'D': 'diamond', 'd': 'thin_diamond', '|': 'vline', '_': 'hline',
'P': 'plus_filled', 'X': 'x_filled', 0: 'tickleft',
1: 'tickright', 2: 'tickup', 3: 'tickdown', 4: 'caretleft', 5: 'caretright',
6: 'caretup', 7: 'caretdown', 8: 'caretleftbase', 9: 'caretrightbase', 10: 'caretupbase',
11: 'caretdownbase', 'None': 'nothing', None: 'nothing', ' ': 'nothing', '': 'nothing'}
markers_keys = list(markers.keys())[:20]
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 22}
mpl.rc('font', **font)
sns.set_style("ticks")
colors = ["windows blue", "amber",
"greyish", "faded green",
"dusty purple","royal blue","lilac",
"salmon","bright turquoise",
"dark maroon","light tan",
"orange","orchid",
"sandy","topaz",
"fuchsia","yellow",
"crimson","cream"
]
current_palette = sns.xkcd_palette(colors)
def print_2D( points,label,id_map ):
'''
points: N_samples * 2
label: (int) N_samples
id_map: map label id to its name
'''
fig = plt.figure(figsize=(6,7))
#current_palette = sns.color_palette("RdBu_r", max(label)+1)
n_cell,_ = points.shape
if n_cell > 500:
s = 30
else:
s = 50
# plt.xlim(round(points[:,0].min()*1.1),round(points[:,0].max()*1.1))
# plt.ylim(round(points[:,1].min()*1.5),round(points[:,1].max()*1.1))
# plt.xlim(-10, 25)
# plt.ylim(-15, 25)
ax = plt.subplot()
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
ax.tick_params(direction='out', length=8, width=2,
grid_alpha=0.5)
print( np.unique(label) )
for i in np.unique(label):
ax.scatter( points[label==i,0], points[label==i,1], c=current_palette[i], label=id_map[i], s=s,marker=markers_keys[i] )
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# ax.legend(scatterpoints=1,loc='upper center',
# bbox_to_anchor=(0.5,-0.1),ncol=6,
# fancybox=True,
# prop={'size':10}
# )
# ax.legend(scatterpoints=1, loc='upper center',
# bbox_to_anchor=(0.5, -0.1), ncol=5,
# frameon=False,
# prop={'size': 10}
# )
# ax.legend(scatterpoints=1, loc=3,
# bbox_to_anchor=(0.86, 0.4), ncol=1,
# frameon=False,
# prop={'size': 18}
# )
sns.despine()
return fig
def print_heatmap( points,label,id_map ):
'''
points: N_samples * N_features
label: (int) N_samples
id_map: map label id to its name
'''
# = sns.color_palette("RdBu_r", max(label)+1)
#cNorm = colors.Normalize(vmin=0,vmax=max(label)) #normalise the colormap
#scalarMap = cm.ScalarMappable(norm=cNorm,cmap='Paired') #map numbers to colors
index = [id_map[i] for i in label]
df = DataFrame(
points,
columns = list(range(points.shape[1])),
index = index
)
row_color = [current_palette[i] for i in label]
cmap = sns.cubehelix_palette(as_cmap=True, rot=-.3, light=1)
g = sns.clustermap( df,cmap=cmap,row_colors=row_color,col_cluster=False,xticklabels=False,yticklabels=False) #,standard_scale=1 )
return g.fig
def measure( predicted,true ):
NMI = normalized_mutual_info_score( true,predicted )
print("NMI:"+str(NMI))
RAND = adjusted_rand_score( true,predicted )
print("RAND:"+str(RAND))
HOMO = homogeneity_score( true,predicted )
print("HOMOGENEITY:"+str(HOMO))
COMPLETENESS = completeness_score( true,predicted )
print("COMPLETENESS:"+str(COMPLETENESS))
return {'NMI':NMI,'RAND':RAND,'HOMOGENEITY':HOMO,'COMPLETENESS':COMPLETENESS}
def clustering( points, k=2,name='kmeans'):
'''
points: N_samples * N_features
k: number of clusters
'''
if name == 'kmeans':
kmeans = KMeans( n_clusters=k,n_init=100 ).fit(points)
## print within_variance
#cluster_distance = kmeans.transform( points )
#within_variance = sum( np.min(cluster_distance,axis=1) ) / float( points.shape[0] )
#print("AvgWithinSS:"+str(within_variance))
if len( np.unique(kmeans.labels_) ) > 1:
si = silhouette_score( points,kmeans.labels_ )
#print("Silhouette:"+str(si))
else:
si = 0
print("Silhouette:"+str(si))
return kmeans.labels_,si
if name == 'spec':
spec= SpectralClustering( n_clusters=k,affinity='cosine' ).fit( points )
si = silhouette_score( points,spec.labels_ )
print("Silhouette:"+str(si))
return spec.labels_,si
def cart2polar( points ):
'''
points: N_samples * 2
'''
return np.c_[np.abs(points), np.angle(points)]
def outliers_detection(expr):
x = PCA(n_components=2).fit_transform(expr)
ee = EllipticEnvelope()
ee.fit(x)
oo = ee.predict(x)
return oo
| 32.66129 | 133 | 0.573004 |
ace9a76144adcd6582facf87da43c91f205db3b0 | 1,695 | py | Python | mars/deploy/yarn/worker.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | 1 | 2022-02-02T03:03:48.000Z | 2022-02-02T03:03:48.000Z | mars/deploy/yarn/worker.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | null | null | null | mars/deploy/yarn/worker.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ..oscar.worker import WorkerCommandRunner
from .config import MarsWorkerConfig
from .core import YarnServiceMixin
class YarnWorkerCommandRunner(YarnServiceMixin, WorkerCommandRunner):
service_name = MarsWorkerConfig.service_name
def __call__(self, *args, **kwargs):
os.environ["MARS_CONTAINER_IP"] = self.get_container_ip()
return super().__call__(*args, **kwargs)
async def start_services(self):
from ..oscar.worker import start_worker
from ...services.cluster import ClusterAPI
self.register_endpoint()
await start_worker(
self.pool.external_address,
self.args.supervisors,
self.band_to_slot,
list(self.args.load_modules),
self.config,
mark_ready=False,
)
await self.wait_all_supervisors_ready()
cluster_api = await ClusterAPI.create(self.args.endpoint)
await cluster_api.mark_node_ready()
main = YarnWorkerCommandRunner()
if __name__ == "__main__": # pragma: no branch
main()
| 31.388889 | 74 | 0.705015 |
ace9a77d3c29fcce295e4465b49977b1d4999ceb | 8,194 | py | Python | run_analysis.py | asmyoo/MSAP | 0ed89f90d67260892a8c4d945504f3b0a2096d36 | [
"MIT"
] | 1 | 2022-03-05T01:34:55.000Z | 2022-03-05T01:34:55.000Z | run_analysis.py | asmyoo/MSAP | 0ed89f90d67260892a8c4d945504f3b0a2096d36 | [
"MIT"
] | null | null | null | run_analysis.py | asmyoo/MSAP | 0ed89f90d67260892a8c4d945504f3b0a2096d36 | [
"MIT"
] | 1 | 2022-03-16T00:33:25.000Z | 2022-03-16T00:33:25.000Z | # -*- coding: utf-8 -*-
"""Model evaluation script.
Authors:
Fangzhou Li - fzli@ucdavis.edu
"""
import os
import pickle
import logging
import numpy as np
import pandas as pd
import click
from msap.modeling.model_evaluation.statistics import (
get_embedded_data,
get_selected_features,
get_curve_metrics,
get_training_statistics,
get_similarity_matrix)
from msap.explanatory_analysis import get_pairwise_correlation
from msap.utils import (
ClassifierHandler,
load_X_and_y,
KFold_by_feature)
from msap.utils.plot import (
plot_heatmap,
plot_embedded_scatter,
plot_rfe_line,
plot_curves,
plot_confusion_matrix)
logger = logging.getLogger(__file__)
logging.basicConfig(
level=logging.INFO)
METHODS_PC = ['pearson', 'spearman', 'kendall']
METHODS_EMBEDDING = ['tsne', 'pca']
METHODS_CURVE = ['pr', 'roc']
CLASSIFIER_MODES = [
'decisiontreeclassifier',
'gaussiannb',
'multinomialnb',
'svc',
'adaboostclassifier',
'randomforestclassifier',
'mlpclassifier']
def parse_model_selection_result(ms_result: tuple) -> list:
"""Parse the model selection result tuple and get the best models.
Args:
ms_result: Model selection result tuple.
Returns:
List of best model and statistics for each classifiers.
"""
candidates, _ = ms_result
candidates = [(i, c, cv['best']) for i, c, cv in candidates]
f1s_mean = []
for i, c, cv_best in candidates:
# Iterate over splits to calculate average F1 score.
f1s = [cv_best[f'split_{j}']['f1'] for j in range(len(cv_best) - 1)]
f1s_mean += [np.mean(np.nan_to_num(f1s))]
candidates = list(zip(candidates, f1s_mean))
candidates = sorted(candidates, key=lambda x: x[1], reverse=True)
best_candidate_per_clf = []
for clf in CLASSIFIER_MODES:
for (i, c, cv_best), f1_mean in candidates:
if c[3] == clf:
if cv_best['param'] is not None:
cv_best['param'] = {k.split('__')[-1]: v
for k, v in cv_best['param'].items()}
best_candidate_per_clf += [((i, c, cv_best), f1_mean)]
break
return best_candidate_per_clf
@click.command()
@click.argument(
'path-input-model-selection-result',
type=click.Path(exists=True))
@click.argument(
'path-input-preprocessed-data-dir',
type=click.Path(exists=True))
@click.argument(
'path-input-data-raw',
type=click.Path(exists=True))
@click.argument(
'path-output-dir',
type=str)
@click.argument(
'feature-label',
type=str)
@click.option(
'--feature-kfold',
type=str,
default=None)
@click.option(
'--random-state',
type=int,
default=42)
def main(
path_input_model_selection_result,
path_input_preprocessed_data_dir,
path_input_data_raw,
path_output_dir,
feature_label,
feature_kfold,
random_state):
"""
"""
if not os.path.exists(path_output_dir):
os.mkdir(path_output_dir)
model_selection_result = None
with open(path_input_model_selection_result, 'rb') as f:
model_selection_result = pickle.load(f)
best_candidate_per_clf = parse_model_selection_result(
model_selection_result)
best_candidate = max(best_candidate_per_clf, key=lambda x: x[1])
_, best_combination, best_cv_result = best_candidate[0]
best_scale_mode, best_impute_mode, best_outlier_mode, best_clf \
= best_combination
pd.DataFrame(best_candidate_per_clf).to_csv(
f"{path_output_dir}/best_clfs.csv")
# X_raw, _ = load_X_and_y(path_input_data_raw, col_y=feature_label)
X, y = load_X_and_y(
f"{path_input_preprocessed_data_dir}/"
f"{best_scale_mode}_{best_impute_mode}_{best_outlier_mode}.csv",
col_y=feature_label)
# idxes_outlier = np.loadtxt(
# f"{path_input_preprocessed_data_dir}/"
# f"{best_scale_mode}_{best_impute_mode}_{best_outlier_mode}"
# "_outlier_indices.txt",
# delimiter='\n',
# dtype=int)
splits = KFold_by_feature(
X=X,
n_splits=5,
feature=feature_kfold,
random_state=random_state)
X = X.drop([feature_kfold], axis=1)
clf = ClassifierHandler(
classifier_mode=best_clf,
params=best_cv_result['param'],
random_state=random_state).clf
# Plot pairwise correlation heatmaps.
for method in METHODS_PC:
corr, pval = get_pairwise_correlation(
X, y, method=method)
y_corr = corr[feature_label].drop([feature_label])
y_pval = pval[feature_label].drop([feature_label])
idxes_rank = y_corr.abs().argsort().tolist()[::-1]
rank = pd.concat(
[y_corr[idxes_rank], y_pval[idxes_rank]],
axis=1)
rank.columns = ['corr', 'p-value']
rank.to_csv(f"{path_output_dir}/pc_rank_{method}.csv")
plot_heatmap(
corr,
title=f"Pairwise {method.capitalize()} Correlation",
path_save=f"{path_output_dir}/pc_{method}.png")
# Plot similarity matrix for the data points heatmap.
sm = get_similarity_matrix(X, y)
plot_heatmap(
sm,
title=f"Similarity Matrix",
cmap='Greys',
path_save=f"{path_output_dir}/sim.png")
# Plot embedded data points.
y_scatter = y.map({1.0: 'Success', 0.0: 'Fail'})
y_scatter.name = 'Translation'
for method in METHODS_EMBEDDING:
X_embedded = pd.DataFrame(
get_embedded_data(
X,
method=method, random_state=random_state))
X_embedded.columns = ['First Dimension', 'Second Dimension']
plot_embedded_scatter(
X_embedded,
y_scatter,
title=f"{method.upper()}",
path_save=f"{path_output_dir}/embed_{method}.png")
# Calculate and plot feature selection for the best model.
sfs = get_selected_features(clf, X, y, splits)
plot_rfe_line(
sfs,
title="Recursive Feature Elimination",
path_save=f"{path_output_dir}/rfe.png")
pd.DataFrame(sfs.get_metric_dict()).transpose().reset_index().to_csv(
f"{path_output_dir}/rfe_result.csv", index=False)
# Calculate and plot curves, all classifiers and the best model.
for method in METHODS_CURVE:
try:
curve_metrics = get_curve_metrics(
clf, X, y, method, splits)
except Exception as e:
logger.info(
f"{method} skipped due to data inbalance. Error Type: "
f"{type(e)}. Error message: {e}")
continue
plot_curves(
curve_metrics,
method=method,
path_save=f"{path_output_dir}/{method}.png")
# # Plot outliers.
# y_in_out = ['Inlier' for _ in range(len(X_raw))]
# for idx in idxes_outlier:
# y_in_out[idx] = 'Outlier'
# y_in_out = pd.Series(y_in_out)
# y_in_out.name = 'Inlier/Outlier'
# for method in METHODS_EMBEDDING:
# X_raw_embedded = pd.DataFrame(
# get_embedded_data(
# X_raw.drop([feature_kfold], axis=1),
# method=method,
# random_state=random_state))
# X_raw_embedded.columns = ['First Dimension', 'Second Dimension']
# plot_embedded_scatter(
# X_raw_embedded,
# y_in_out,
# title=f"Outlier Detection with {method.upper()}",
# path_save=f"{path_output_dir}/outliers_{method}.png")
# Plot confusion matrix with various metrics for validation.
del best_cv_result['param']
plot_confusion_matrix(
cv_result=best_cv_result,
axis_labels=['Success', 'Failure'],
path_save=f"{path_output_dir}/cm.png")
# Plot confusion matrix with various metrics for validation.
best_cv_result_train = get_training_statistics(
clf, X, y, splits)
plot_confusion_matrix(
cv_result=best_cv_result_train,
axis_labels=['Success', 'Failure'],
path_save=f"{path_output_dir}/cm_train.png")
if __name__ == '__main__':
main()
| 30.804511 | 77 | 0.634611 |
ace9a7f19b1d1278c00d4e02e3d2746e6b99ad43 | 15,781 | py | Python | milk/supervised/svm.py | aflaxman/milk | 252806fd081dc1b3c7fe34b14f9e7a4b646e0b49 | [
"MIT"
] | 1 | 2015-01-19T22:41:40.000Z | 2015-01-19T22:41:40.000Z | milk/supervised/svm.py | aflaxman/milk | 252806fd081dc1b3c7fe34b14f9e7a4b646e0b49 | [
"MIT"
] | null | null | null | milk/supervised/svm.py | aflaxman/milk | 252806fd081dc1b3c7fe34b14f9e7a4b646e0b49 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2008-2012, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# License: MIT. See COPYING.MIT file in the milk distribution
from __future__ import division
from .classifier import normaliselabels, ctransforms_model
from .base import supervised_model
from collections import deque
import numpy
import numpy as np
import random
from . import _svm
__all__ = [
'rbf_kernel',
'polynomial_kernel',
'precomputed_kernel',
'dot_kernel',
'svm_raw',
'svm_binary',
'svm_to_binary',
'svm_sigmoidal_correction',
'sigma_value_fisher',
'fisher_tuned_rbf_svm',
]
def _svm_apply(SVM, q):
'''
f_i = _svm_apply(SVM, q)
@internal: This is mostly used for testing
'''
X,Y,Alphas,b,C,kernel=SVM
N = len(X)
s = 0.0
for i in xrange(N):
s += Alphas[i] * Y[i] * kernel(q, X[i])
return s - b
def svm_learn_smo(X,Y,kernel,C,eps=1e-4,tol=1e-2,cache_size=(1<<20)):
'''
Learn a svm classifier
X: data
Y: labels in SVM format (ie Y[i] in (1,-1))
This is a very raw interface. In general, you should use a class
like svm_classifier.
Implements the Sequential Minimum Optimisation Algorithm from Platt's
"Fast training of support vector machines using sequential minimal optimization"
in Advances in kernel methods: support vector learning
Pages: 185 - 208
Year of Publication: 1999
ISBN:0-262-19416-3
'''
assert numpy.all(numpy.abs(Y) == 1)
assert len(X) == len(Y)
N = len(Y)
Y = Y.astype(numpy.int32)
params = numpy.array([0,C,1e-3,1e-5],numpy.double)
Alphas0 = numpy.zeros(N, numpy.double)
_svm.eval_SMO(X,Y,Alphas0,params,kernel,cache_size)
return Alphas0, params[0]
def svm_learn_libsvm(features, labels, kernel, C, eps=1e-4, tol=1e-2, cache_size=(1<<20), alphas=None):
'''
Learn a svm classifier using LIBSVM optimiser
This is a very raw interface. In general, you should use a class
like svm_classifier.
This uses the LIBSVM optimisation algorithm
Parameters
----------
X : ndarray
data
Y : ndarray
labels in SVM format (ie Y[i] in (1,-1))
kernel : kernel
C : float
eps : float, optional
tol : float, optional
cache_size : int, optional
alphas : ndarray, optional
Returns
-------
alphas : ndarray
b : float
'''
if not np.all(np.abs(labels) == 1):
raise ValueError('milk.supervised.svm.svm_learn_libsvm: Y[i] != (-1,+1)')
assert len(features) == len(labels)
n = len(labels)
labels = labels.astype(np.int32)
p = -np.ones(n, np.double)
params = np.array([0,C,eps,tol], dtype=np.double)
if alphas is None:
alphas = np.zeros(n, np.double)
elif alphas.dtype != np.double or len(alphas) != n:
raise ValueError('milk.supervised.svm_learn_libsvm: alphas is in wrong format')
_svm.eval_LIBSVM(features, labels, alphas, p, params, kernel, cache_size)
return alphas, params[0]
class preprocessed_rbf_kernel(object):
def __init__(self, X, sigma, beta):
self.X = X
self.Xsum = (X**2).sum(1)
self.sigma = sigma
self.beta = beta
def call_many(self, qs):
from milk.unsupervised import pdist
dists = pdist(self.X, qs, 'euclidean2')
dists /= -self.sigma
np.exp(dists, dists)
dists *= self.beta
return dists.T
def __call__(self, q):
minus_d2_sigma = np.dot(self.X,q)
minus_d2_sigma *= 2.
minus_d2_sigma -= self.Xsum
minus_d2_sigma -= np.dot(q,q)
minus_d2_sigma /= self.sigma
return self.beta * np.exp(minus_d2_sigma)
class rbf_kernel(object):
'''
kernel = rbf_kernel(sigma,beta=1)
Radial Basis Function kernel
Returns a kernel (ie, a function that implements)
beta * exp( - ||x1 - x2|| / sigma)
'''
def __init__(self, sigma, beta=1):
self.sigma = sigma
self.beta = beta
self.kernel_nr_ = 0
self.kernel_arg_ = float(sigma)
def __call__(self, x1, x2):
d2 = x1 - x2
d2 **= 2
d2 = d2.sum()
res = self.beta*np.exp(-d2/self.sigma)
return res
def preprocess(self, X):
return preprocessed_rbf_kernel(X, self.sigma, self.beta)
class polynomial_kernel(object):
'''
kernel = polynomial_kernel(d,c=1)
returns a kernel (ie, a function) that implements:
(<x1,x2>+c)**d
'''
def __init__(self, d, c=1):
self.d = d
self.c = c
def __call__(self,x1,x2):
return (np.dot(x1,x2)+self.c)**self.d
class precomputed_kernel(object):
'''
kernel = precomputed_kernel(kmatrix)
A "fake" kernel which is precomputed.
'''
def __init__(self, kmatrix, copy=False):
kmatrix = np.ascontiguousarray(kmatrix, np.double, copy=copy)
self.kernel_nr_ = 1
self.kernel_arg_ = 0.
def __call__(self, x0, x1):
return kmatrix[x0,x1]
class _call_kernel(object):
def __init__(self, k, svs):
self.svs = svs
self.kernel = k
def __call__(self, q):
return np.array([self.kernel(s, q) for s in self.svs])
class preprocessed_dot_kernel(object):
def __init__(self, svs):
self.svs = svs
def __call__(self, x1):
return np.dot(self.svs, x1)
class dot_kernel(object):
def __init__(self):
self.kernel_nr_ = 2
self.kernel_arg_ = 0.
def __call__(self, x0, x1):
return np.dot(x0, x1)
def preprocess(self, svs):
return preprocessed_dot_kernel(svs)
class svm_raw_model(supervised_model):
def __init__(self, svs, Yw, b, kernel):
self.svs = svs
self.Yw = Yw
self.b = b
self.kernel = kernel
try:
self.kernelfunction = self.kernel.preprocess(self.svs)
except AttributeError:
self.kernelfunction = _call_kernel(self.kernel, self.svs)
def apply_many(self, qs):
try:
qs = self.kernelfunction.call_many(qs)
except AttributeError:
qs = np.array(map(self.kernelfunction, qs))
return np.dot(qs, self.Yw) - self.b
def apply(self, q):
Q = self.kernelfunction(q)
return np.dot(Q, self.Yw) - self.b
class svm_raw(object):
'''
svm_raw: classifier
classifier = svm_raw(kernel, C, eps=1e-3, tol=1e-8)
Parameters
----------
kernel : callable
the kernel to use. This should be a function that takes two data
arguments see rbf_kernel and polynomial_kernel.
C : float
the C parameter
eps : float, optional
the precision to which to solve the problem (default 1e-3)
tol : float, optional
(|x| < tol) is considered zero
'''
def __init__(self, kernel=None, C=1., eps=1e-3, tol=1e-8):
self.C = C
self.kernel = kernel
self.eps = eps
self.tol = tol
self.algorithm = 'libsvm'
def train(self, features, labels, normalisedlabels=False, **kwargs):
assert self.kernel is not None, 'milk.supervised.svm_raw.train: kernel not set!'
assert self.algorithm in ('libsvm','smo'), 'milk.supervised.svm_raw: unknown algorithm (%s)' % self.algorithm
assert not (np.isinf(self.C) or np.isnan(self.C)), 'milk.supervised.svm_raw: setting C to NaN or Inf causes problems.'
features = np.asanyarray(features)
if normalisedlabels:
Y = labels.copy()
else:
Y,_ = normaliselabels(labels)
assert Y.max() == 1, 'milk.supervised.svm_raw can only handle binary problems'
Y *= 2
Y -= 1
kernel = self.kernel
try:
kernel = (self.kernel.kernel_nr_, self.kernel.kernel_arg_)
features = np.ascontiguousarray(features, np.double)
except AttributeError:
pass
if self.algorithm == 'smo':
alphas,b = svm_learn_smo(features,Y,kernel,self.C,self.eps,self.tol)
else:
alphas,b = svm_learn_libsvm(features,Y,kernel,self.C,self.eps,self.tol)
svsi = (alphas != 0)
svs = features[svsi]
w = alphas[svsi]
Y = Y[svsi]
Yw = w * Y
return svm_raw_model(svs, Yw, b, self.kernel)
def get_params(self):
return self.C, self.eps,self.tol
def set_params(self,params):
self.C,self.eps,self.tol = params
def set_option(self, optname, value):
setattr(self, optname, value)
def learn_sigmoid_constants(F,Y,
max_iters=None,
min_step=1e-10,
sigma=1e-12,
eps=1e-5):
'''
A,B = learn_sigmoid_constants(F,Y)
This is a very low-level interface look into the svm_classifier class.
Parameters
----------
F : Values of the function F
Y : Labels (in boolean format, ie, in (0,1))
Other Parameters
----------------
max_iters : Maximum nr. of iterations
min_step : Minimum step
sigma : sigma
eps : A small number
Reference for Implementation
----------------------------
Implements the algorithm from "A Note on Platt's Probabilistic Outputs for
Support Vector Machines" by Lin, Lin, and Weng.
Machine Learning, Vol. 68, No. 3. (23 October 2007), pp. 267-276
'''
# Below we use safe constructs to avoid using the overflown values, but we
# must compute them because of the way numpy works.
errorstate = np.seterr(over='ignore')
# the deci[i] array is called F in this code
F = np.asanyarray(F)
Y = np.asanyarray(Y)
assert len(F) == len(Y)
assert numpy.all( (Y == 1) | (Y == 0) )
if max_iters is None:
max_iters = 1000
prior1 = Y.sum()
prior0 = len(F)-prior1
small_nr = 1e-4
hi_t = (prior1+1.)/(prior1+2.)
lo_t = 1./(prior0+2.)
T = Y*hi_t + (1-Y)*lo_t
A = 0.
B = np.log( (prior0+1.)/(prior1+1.) )
def target(A,B):
fApB = F*A + B
lef = np.log1p(np.exp(fApB))
lemf = np.log1p(np.exp(-fApB))
fvals = np.choose(fApB >= 0, ( T*fApB + lemf, (T-1.)*fApB + lef))
return np.sum(fvals)
fval = target(A,B)
for iter in xrange(max_iters):
fApB = F*A + B
ef = np.exp(fApB)
emf = np.exp(-fApB)
p = np.choose(fApB >= 0, ( emf/(1.+emf), 1./(1.+ef) ))
q = np.choose(fApB >= 0, ( 1/(1.+emf), ef/(1.+ef) ))
d2 = p * q
h11 = np.dot(F*F,d2) + sigma
h22 = np.sum(d2) + sigma
h21 = np.dot(F,d2)
d1 = T - p
g1 = np.dot(F,d1)
g2 = np.sum(d1)
if abs(g1) < eps and abs(g2) < eps: # Stopping criteria
break
det = h11*h22 - h21*h21
dA = - (h22*g1 - h21*g2)/det
dB = - (h21*g1 + h11*g2)/det
gd = g1*dA + g2*dB
stepsize = 1.
while stepsize >= min_step:
newA = A + stepsize*dA
newB = B + stepsize*dB
newf = target(newA,newB)
if newf < fval+eps*stepsize*gd:
A = newA
B = newB
fval = newf
break
stepsize /= 2
else:
print 'Line search fails'
break
np.seterr(**errorstate)
return A,B
class svm_binary_model(supervised_model):
def __init__(self, classes):
self.classes = classes
def apply(self,f):
return self.classes[f >= 0.]
class svm_binary(object):
'''
classifier = svm_binary()
model = classifier.train(features, labels)
assert model.apply(f) in labels
'''
def train(self, features, labels, normalisedlabels=False, **kwargs):
if normalisedlabels:
return svm_binary_model( (0,1) )
assert len(labels) >= 2, 'Cannot train from a single example'
names = sorted(set(labels))
assert len(names) == 2, 'milk.supervised.svm.svm_binary.train: Can only handle two class problems'
return svm_binary_model(names)
class svm_to_binary(object):
'''
svm_to_binary(base_svm)
A simple wrapper so that
svm_to_binary(base_svm)
is a model that takes the base_svm classifier and then binarises its model output.
NOTE: This class does the same job as::
ctransforms(base_svm, svm_binary())
'''
def __init__(self, svm_base):
'''
binclassifier = svm_to_binary(svm_base)
a classifier that binarises the output of svm_base.
'''
self.base = svm_base
def train(self, features, labels, **kwargs):
model = self.base.train(features, labels, **kwargs)
binary = svm_binary()
binary_model = binary.train(features, labels, **kwargs)
return ctransforms_model([model, binary_model])
def set_option(self, opt, value):
self.base.set_option(opt, value)
class svm_sigmoidal_correction_model(supervised_model):
def __init__(self, A, B):
self.A = A
self.B = B
def apply(self,features):
return 1./(1.+numpy.exp(features*self.A+self.B))
class svm_sigmoidal_correction(object):
'''
svm_sigmoidal_correction : a classifier
Sigmoidal approximation for obtaining a probability estimate out of the output
of an SVM.
'''
def __init__(self):
self.max_iters = None
def train(self, features, labels, **kwargs):
A,B = learn_sigmoid_constants(features,labels,self.max_iters)
return svm_sigmoidal_correction_model(A, B)
def get_params(self):
return self.max_iters
def set_params(self,params):
self.max_iters = params
def sigma_value_fisher(features,labels):
'''
f = sigma_value_fisher(features,labels)
value_s = f(s)
Computes a function which computes how good the value of sigma
is for the features. This function should be *minimised* for a
good value of sigma.
Parameters
-----------
features : features matrix as 2-ndarray.
Returns
-------
f : a function: float -> float
this function should be minimised for a good `sigma`
Reference
----------
Implements the measure in
"Determination of the spread parameter in the
Gaussian kernel for classification and regression"
by Wenjian Wanga, Zongben Xua, Weizhen Luc, and Xiaoyun Zhanga
'''
features = np.asanyarray(features)
xij = np.dot(features,features.T)
f2 = np.sum(features**2,1)
d = f2-2*xij
d = d.T + f2
N1 = (labels==0).sum()
N2 = (labels==1).sum()
C1 = -d[labels == 0][:,labels == 0]
C2 = -d[labels == 1][:,labels == 1]
C12 = -d[labels == 0][:,labels == 1]
C1 = C1.copy()
C2 = C2.copy()
C12 = C12.copy()
def f(sigma):
sigma = float(sigma)
N1 = C1.shape[0]
N2 = C2.shape[0]
if C12.shape != (N1,N2):
raise ValueError
C1v = np.sum(np.exp(C1/sigma))/N1
C2v = np.sum(np.exp(C2/sigma))/N2
C12v = np.sum(np.exp(C12/sigma))/N1/N2
return (N1 + N2 - C1v - C2v)/(C1v/N1+C2v/N2 - 2.*C12v)
return f
class fisher_tuned_rbf_svm(object):
'''
F = fisher_tuned_rbf_svm(sigmas, base)
Returns a wrapper classifier that uses RBF kernels automatically
tuned using sigma_value_fisher.
'''
def __init__(self, sigmas, base):
self.sigmas = sigmas
self.base = base
def train(self, features, labels, **kwargs):
f = sigma_value_fisher(features, labels)
fs = [f(s) for s in self.sigmas]
self.sigma = self.sigmas[np.argmin(fs)]
self.base.set_option('kernel',rbf_kernel(self.sigma))
return self.base.train(features, labels, **kwargs)
| 28.180357 | 126 | 0.595463 |
ace9a833470528de91b321bf86f904ed952c34ba | 577 | py | Python | tests/migrations/test_migrations_squashed/0002_second.py | fizista/django | 16f3a6a4c7bab11644d11c2be029374e5095cb56 | [
"BSD-3-Clause"
] | 1 | 2019-02-10T19:33:27.000Z | 2019-02-10T19:33:27.000Z | tests/migrations/test_migrations_squashed/0002_second.py | fizista/django | 16f3a6a4c7bab11644d11c2be029374e5095cb56 | [
"BSD-3-Clause"
] | null | null | null | tests/migrations/test_migrations_squashed/0002_second.py | fizista/django | 16f3a6a4c7bab11644d11c2be029374e5095cb56 | [
"BSD-3-Clause"
] | 1 | 2020-10-01T08:23:34.000Z | 2020-10-01T08:23:34.000Z | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("migrations", "0001_initial")]
operations = [
migrations.DeleteModel("Tribble"),
migrations.RemoveField("Author", "silly_field"),
migrations.AddField("Author", "rating", models.IntegerField(default=0)),
migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("migrations.Author", null=True)),
],
)
]
| 23.08 | 80 | 0.582322 |
ace9a8fba12eb3e470290ac2a59278072dce539e | 93 | py | Python | Config/__init__.py | louisyoungx/tcp_transfer_server | e6f0e639a884caa65daa218bc32b9ef7711d6d31 | [
"MIT"
] | null | null | null | Config/__init__.py | louisyoungx/tcp_transfer_server | e6f0e639a884caa65daa218bc32b9ef7711d6d31 | [
"MIT"
] | null | null | null | Config/__init__.py | louisyoungx/tcp_transfer_server | e6f0e639a884caa65daa218bc32b9ef7711d6d31 | [
"MIT"
] | null | null | null | """
设置模块
"""
from .config import Config
from .direct import Direct
config = Config().direct
| 11.625 | 26 | 0.709677 |
ace9a9bee93e11e92eef9f1fcdbddb0d4a3009d9 | 713 | py | Python | nbs/dl2/exp/nb_02.py | osamak/course-v3 | 68ee8a707bf612a9c3b98c33d3de5aa1ae73cd30 | [
"Apache-2.0"
] | 5,111 | 2018-10-11T19:56:14.000Z | 2022-03-31T20:19:03.000Z | nbs/dl2/exp/nb_02.py | osamak/course-v3 | 68ee8a707bf612a9c3b98c33d3de5aa1ae73cd30 | [
"Apache-2.0"
] | 404 | 2018-10-20T14:52:58.000Z | 2021-06-17T23:47:36.000Z | nbs/dl2/exp/nb_02.py | osamak/course-v3 | 68ee8a707bf612a9c3b98c33d3de5aa1ae73cd30 | [
"Apache-2.0"
] | 4,381 | 2018-10-13T01:29:19.000Z | 2022-03-20T17:18:02.000Z |
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/02_fully_connected.ipynb
from exp.nb_01 import *
def get_data():
path = datasets.download_data(MNIST_URL, ext='.gz')
with gzip.open(path, 'rb') as f:
((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding='latin-1')
return map(tensor, (x_train,y_train,x_valid,y_valid))
def normalize(x, m, s): return (x-m)/s
def test_near_zero(a,tol=1e-3): assert a.abs()<tol, f"Near zero: {a}"
from torch.nn import init
def mse(output, targ): return (output.squeeze(-1) - targ).pow(2).mean()
from torch import nn | 31 | 88 | 0.582048 |
ace9a9f3cc30efdeddb14208871eb136a402e759 | 502 | py | Python | tests/conftest.py | timeoutdigital/treehugger | 0ea6ffa375e7836e1b396f0b0b8856d11f936d44 | [
"0BSD"
] | 11 | 2017-03-14T06:59:02.000Z | 2021-08-18T15:26:49.000Z | tests/conftest.py | timeoutdigital/treehugger | 0ea6ffa375e7836e1b396f0b0b8856d11f936d44 | [
"0BSD"
] | 28 | 2017-03-21T09:44:57.000Z | 2020-01-20T14:26:38.000Z | tests/conftest.py | timeoutdigital/treehugger | 0ea6ffa375e7836e1b396f0b0b8856d11f936d44 | [
"0BSD"
] | 3 | 2017-07-04T13:09:34.000Z | 2018-09-30T08:38:46.000Z | import pytest
from botocore.stub import Stubber
from treehugger.kms import kms_agent
from treehugger.s3 import s3_client
@pytest.fixture(scope='function', autouse=True)
def kms_stub():
kms_agent.reset()
with Stubber(kms_agent.kms_client) as stubber:
yield stubber
stubber.assert_no_pending_responses()
@pytest.fixture(scope='function', autouse=True)
def s3_stub():
with Stubber(s3_client) as stubber:
yield stubber
stubber.assert_no_pending_responses()
| 23.904762 | 50 | 0.743028 |
ace9aa4a62ae7b4eb33f46404673e7b19f4efb1f | 1,929 | py | Python | setup.py | declankeyesbevan/py-dev-hammer | 6e5c8a0d35f70a082ec32aa94d01b15dfd8c440b | [
"MIT"
] | 2 | 2019-04-18T21:51:18.000Z | 2019-06-17T11:23:15.000Z | setup.py | declankeyesbevan/py-dev-hammer | 6e5c8a0d35f70a082ec32aa94d01b15dfd8c440b | [
"MIT"
] | null | null | null | setup.py | declankeyesbevan/py-dev-hammer | 6e5c8a0d35f70a082ec32aa94d01b15dfd8c440b | [
"MIT"
] | null | null | null | """
Sets up PyDevHammer so you can Hammer your Py Dev.
"""
# pylint: disable=redefined-builtin
from codecs import open
from os import path
from setuptools import setup, find_packages
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, 'README.md'), encoding='utf-8') as readme_file:
README = readme_file.read()
with open(path.join(HERE, 'HISTORY.md'), encoding='utf-8') as history_file:
HISTORY = history_file.read().replace('.. :changelog:', '')
ROOT_URL = 'https://github.com/declankeyesbevan/py-dev-hammer'
setup(
name='pydevhammer',
use_scm_version=True,
description='Python development tools using AWS and GitHub',
long_description=f'{README}\n{HISTORY}',
long_description_content_type='text/markdown',
url=ROOT_URL,
author='Declan Keyes-Bevan',
author_email='declankeyesbevan@users.noreply.github.com',
license='MIT',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
],
keywords='github aws codebuild continuous-integration dev-tools',
packages=find_packages(exclude=['tests']),
install_requires=[
'boto3',
'botocore',
'certifi',
'coverage',
'dpath',
'linecounter',
'pylint',
'pytest',
'PyYAML',
'requests',
'urllib3',
],
setup_requires=[
'setuptools_scm',
],
python_requires='~=3.6',
entry_points={
'console_scripts': [
'github_status_posting=py_dev_hammer.github_status_posting:entry_point',
]
},
project_urls={
'Documentation': f'{ROOT_URL}/docs',
'Issues': f'{ROOT_URL}/issues',
'Source': ROOT_URL,
},
)
| 29.227273 | 84 | 0.628823 |
ace9ab594ab7a7c06529c16ee77df6b092cc33eb | 14,481 | py | Python | node_manager_fkie/src/node_manager_fkie/topic_list_model.py | ahoarau/multimaster_fkie | 82bf341423bd3c2a15005c85eca9de5747cb8069 | [
"BSD-3-Clause"
] | null | null | null | node_manager_fkie/src/node_manager_fkie/topic_list_model.py | ahoarau/multimaster_fkie | 82bf341423bd3c2a15005c85eca9de5747cb8069 | [
"BSD-3-Clause"
] | 1 | 2018-04-20T13:03:34.000Z | 2018-04-20T13:03:34.000Z | node_manager_fkie/src/node_manager_fkie/topic_list_model.py | ahoarau/multimaster_fkie | 82bf341423bd3c2a15005c85eca9de5747cb8069 | [
"BSD-3-Clause"
] | null | null | null | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from python_qt_binding.QtCore import Qt
from python_qt_binding.QtGui import QIcon, QStandardItem, QStandardItemModel
from detailed_msg_box import MessageBox
from master_discovery_fkie.master_info import TopicInfo
from node_manager_fkie.common import utf8
class TopicItem(QStandardItem):
'''
The topic item stored in the topic model. This class stores the topic as
U{master_discovery_fkie.TopicInfo<http://docs.ros.org/kinetic/api/master_discovery_fkie/html/modules.html#master_discovery_fkie.master_info.TopicInfo>}.
The name of the topic represented in HTML.
'''
ITEM_TYPE = QStandardItem.UserType + 36
NAME_ROLE = Qt.UserRole + 1
NODENAMES_ROLE = Qt.UserRole + 2
COL_PUB = 1
COL_SUB = 2
COL_TYPE = 3
def __init__(self, name, topic=None, parent=None):
'''
Initialize the topic item.
@param name: the topic name
@type name: C{str}
@param topic: the topic info
@type topic: U{master_discovery_fkie.TopicInfo<http://docs.ros.org/kinetic/api/master_discovery_fkie/html/modules.html#master_discovery_fkie.master_info.TopicInfo>}
'''
QStandardItem.__init__(self, name)
self.parent_item = parent
self._publish_thread = None
self.topic = TopicInfo(name) if topic is None else topic
'''@ivar: topic as U{master_discovery_fkie.TopicInfo<http://docs.ros.org/kinetic/api/master_discovery_fkie/html/modules.html#master_discovery_fkie.master_info.TopicInfo>}.'''
# def __del__(self):
# print "delete TOPIC", self.__topic.name
def updateView(self):
'''
Updates the view
'''
self.updatePublisherView()
self.updateSubscriberView()
self.updateTypeView()
def updatePublisherView(self):
'''
Updates the representation of the column contains the publisher state.
'''
if self.parent_item is not None:
cfg_col = self.parent_item.child(self.row(), TopicItem.COL_PUB)
if cfg_col is not None and isinstance(cfg_col, QStandardItem):
cfg_col.setText(str(len(self.topic.publisherNodes)))
tooltip = ''.join(['<h4>', 'Publisher [', self.topic.name, ']:</h4><dl>'])
for p in self.topic.publisherNodes:
tooltip = ''.join([tooltip, '<dt>', p, '</dt>'])
tooltip = ''.join([tooltip, '</dl>'])
if len(self.topic.publisherNodes) > 0:
cfg_col.setToolTip(''.join(['<div>', tooltip, '</div>']))
def updateSubscriberView(self):
'''
Updates the representation of the column contains the subscriber state.
'''
if self.parent_item is not None:
cfg_col = self.parent_item.child(self.row(), TopicItem.COL_SUB)
if cfg_col is not None and isinstance(cfg_col, QStandardItem):
cfg_col.setText(str(len(self.topic.subscriberNodes)))
tooltip = ''.join(['<h4>', 'Subscriber [', self.topic.name, ']:</h4><dl>'])
for p in self.topic.subscriberNodes:
tooltip = ''.join([tooltip, '<dt>', p, '</dt>'])
tooltip = ''.join([tooltip, '</dl>'])
if len(self.topic.subscriberNodes) > 0:
cfg_col.setToolTip(''.join(['<div>', tooltip, '</div>']))
def updateTypeView(self):
'''
Updates the representation of the column contains the type of the topic.
'''
if self.parent_item is not None:
cfg_col = self.parent_item.child(self.row(), TopicItem.COL_TYPE)
if cfg_col is not None and isinstance(cfg_col, QStandardItem):
cfg_col.setText(self.topic.type if self.topic.type and self.topic.type != 'None' else 'unknown type')
# removed tooltip for clarity!!!
# if not self.topic.type is None and not cfg_col.toolTip():
# return
# # removed tooltip for clarity !!!
# # tooltip = ''
# try:
# mclass = roslib.message.get_message_class(self.topic.type)
# # tooltip = utf8(mclass)
# if not mclass is None:
# # tooltip = utf8(mclass.__slots__)
# for f in mclass.__slots__:
# idx = mclass.__slots__.index(f)
# idtype = mclass._slot_types[idx]
# base_type = roslib.msgs.base_msg_type(idtype)
# primitive = "unknown"
# if base_type in roslib.msgs.PRIMITIVE_TYPES:
# primitive = "primitive"
# else:
# try:
# list_msg_class = roslib.message.get_message_class(base_type)
# primitive = "class", list_msg_class.__slots__
# except ValueError:
# pass
# # tooltip = ''.join([tooltip, '\n\t', utf8(f), ': ', utf8(idtype), ' (', utf8(primitive),')'])
# except ValueError:
# pass
# cfg_col.setToolTip(tooltip)
def _on_wait_for_publishing(self):
self.updateIconView(QIcon(':/icons/state_off.png'))
def _on_partial_publishing(self):
self.updateIconView(QIcon(':/icons/state_part.png'))
def _on_publishing(self):
self.updateIconView(QIcon(':/icons/state_run.png'))
def _publish_finished(self):
self._publish_thread = None
self.setIcon(QIcon())
def show_error_msg(self, msg):
MessageBox.warning(self, "Publish error",
'Error while publish to %s' % self.topic.name,
tr(utf8(msg)))
def type(self):
return TopicItem.ITEM_TYPE
def data(self, role):
if role == self.NAME_ROLE:
return self.topic.name
elif role == self.NODENAMES_ROLE:
return utf8(self.topic.publisherNodes) + utf8(self.topic.subscriberNodes)
else:
return QStandardItem.data(self, role)
@classmethod
def getItemList(self, topic, root):
'''
Creates the list of the items from topic. This list is used for the
visualization of topic data as a table row.
@param topic: the topic name
@type topic: C{str}
@param root: The parent QStandardItem
@type root: U{QStandardItem<https://srinikom.github.io/pyside-docs/PySide/QtGui/QStandardItem.html>}
@return: the list for the representation as a row
@rtype: C{[L{TopicItem} or U{QStandardItem<https://srinikom.github.io/pyside-docs/PySide/QtGui/QStandardItem.html>}, ...]}
'''
items = []
item = TopicItem(topic.name, topic, parent=root)
items.append(item)
pubItem = QStandardItem()
# TopicItem.updatePublisherView(topic, pubItem)
items.append(pubItem)
subItem = QStandardItem()
# TopicItem.updateSubscriberView(topic, subItem)
items.append(subItem)
typeItem = QStandardItem()
# TopicItem.updateTypeView(topic, typeItem)
items.append(typeItem)
return items
# def __eq__(self, item):
# '''
# Compares the name of topic.
# '''
# if isinstance(item, str) or isinstance(item, unicode):
# return self.topic.name.lower() == item.lower()
# elif not (item is None):
# return self.topic.name.lower() == item.topic.name.lower()
# return False
#
# def __gt__(self, item):
# '''
# Compares the name of topic.
# '''
# if isinstance(item, str) or isinstance(item, unicode):
# return self.topic.name.lower() > item.lower()
# elif not (item is None):
# return self.topic.name.lower() > item.topic.name.lower()
# return False
class TopicModel(QStandardItemModel):
'''
The model to manage the list with topics in ROS network.
'''
header = [('Name', 300),
('Publisher', 50),
('Subscriber', 50),
('Type', -1)]
'''@ivar: the list with columns C{[(name, width), ...]}'''
def __init__(self):
'''
Creates a new list model.
'''
QStandardItemModel.__init__(self)
self.setColumnCount(len(TopicModel.header))
self.setHorizontalHeaderLabels([label for label, _ in TopicModel.header])
self.pyqt_workaround = dict() # workaround for using with PyQt: store the python object to keep the defined attributes in the TopicItem subclass
def flags(self, index):
'''
@param index: parent of the list
@type index: U{QtCore.QModelIndex<https://srinikom.github.io/pyside-docs/PySide/QtCore/QModelIndex.html>}
@return: Flag or the requested item
@rtype: U{QtCore.Qt.ItemFlag<https://srinikom.github.io/pyside-docs/PySide/QtCore/Qt.html>}
@see: U{http://www.pyside.org/docs/pyside-1.0.1/PySide/QtCore/Qt.html}
'''
if not index.isValid():
return Qt.NoItemFlags
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
def updateModelData(self, topics, added_topics, updated_topics, removed_topics):
'''
Updates the topics model. New topic will be inserted in sorting order. Not
available topics removed from the model.
@param topics: The dictionary with topics
@type topics: C{dict(topic name : U{master_discovery_fkie.TopicInfo<http://docs.ros.org/kinetic/api/master_discovery_fkie/html/modules.html#master_discovery_fkie.master_info.TopicInfo>}, ...)}
@param added_topics: the list of new topics in the :topics: list
@type added_topics: list or set
@param updated_topics: the list of updated topics in the :topics: list
@type updated_topics: list or set
@param removed_topics: the list of removed topics in the :topics: list
@type removed_topics: list or set
'''
root = self.invisibleRootItem()
# remove or update the existing items
for i in reversed(range(root.rowCount())):
topicItem = root.child(i)
if topicItem.topic.name in removed_topics:
root.removeRow(i)
try:
del self.pyqt_workaround[topicItem.topic.name] # workaround for using with PyQt: store the python object to keep the defined attributes in the TopicItem subclass
except:
pass
elif topicItem.topic.name in updated_topics:
topicItem.updateView()
# cputimes = os.times()
# cputime_init = cputimes[0] + cputimes[1]
# insert other items in sorted order
for topic_name in added_topics:
try:
doAddItem = True
topic = topics[topic_name]
for i in range(root.rowCount()):
if topic_name not in updated_topics:
topicItem = root.child(i)
if cmp(topicItem.topic.name, topic_name) > 0:
new_item_row = TopicItem.getItemList(topic, root)
root.insertRow(i, new_item_row)
self.pyqt_workaround[topic_name] = new_item_row[0] # workaround for using with PyQt: store the python object to keep the defined attributes in the TopicItem subclass
new_item_row[0].updateView()
doAddItem = False
break
else:
doAddItem = False
break
if doAddItem:
new_item_row = TopicItem.getItemList(topic, root)
root.appendRow(new_item_row)
self.pyqt_workaround[topic_name] = new_item_row[0]
new_item_row[0].updateView()
except:
pass
# cputimes = os.times()
# cputime = cputimes[0] + cputimes[1] - cputime_init
# print " update topic ", cputime, ", topic count:", len(topics)
def index_from_names(self, publisher, subscriber):
'''
Returns for given topics the list of QModelIndex in this model.
:param publisher: the list of publisher topics
:type publisher: [str, ...]
:param subscriber: the list of subscriber topics
:type subscriber: [str, ...]
:return: the list of QModelIndex
:rtype: [QtCore.QModelIndex, ...]
'''
result = []
root = self.invisibleRootItem()
for i in range(root.rowCount()):
topicItem = root.child(i)
if topicItem.topic.name in publisher:
result.append(self.index(i, 0))
result.append(self.index(i, 1)) # select also the publishers column
if topicItem.topic.name in subscriber:
result.append(self.index(i, 0))
result.append(self.index(i, 2)) # select also the subscribers column
return result
| 43.61747 | 200 | 0.617499 |
ace9acb47717ed6c6c84d4647b78d2da9e3e89ef | 329 | py | Python | embedding/deep/my_django_bokeh_site/my_django_bokeh_site/asgi.py | Aquaveo/jupyter_to_tethys | b2fc246d31ef0526666bc5db856551ec9704f315 | [
"MIT"
] | null | null | null | embedding/deep/my_django_bokeh_site/my_django_bokeh_site/asgi.py | Aquaveo/jupyter_to_tethys | b2fc246d31ef0526666bc5db856551ec9704f315 | [
"MIT"
] | null | null | null | embedding/deep/my_django_bokeh_site/my_django_bokeh_site/asgi.py | Aquaveo/jupyter_to_tethys | b2fc246d31ef0526666bc5db856551ec9704f315 | [
"MIT"
] | 1 | 2019-02-12T19:17:54.000Z | 2019-02-12T19:17:54.000Z | """
ASGI entrypoint. Configures Django and then runs the application
defined in the ASGI_APPLICATION setting.
"""
import os
import django
from channels.routing import get_default_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "my_django_bokeh_site.settings")
django.setup()
application = get_default_application()
| 25.307692 | 80 | 0.826748 |
ace9ad0d35d5ccc167a44d6d7fe89c724a104646 | 2,015 | py | Python | src/data_preparation/display_annotations.py | lukaszkepka/PostureGuard | ce603f8e802eba35729b25f03c763e2587f29f00 | [
"MIT"
] | 8 | 2021-03-24T15:26:58.000Z | 2022-03-13T23:17:56.000Z | src/data_preparation/display_annotations.py | lukaszkepka/PostureGuard | ce603f8e802eba35729b25f03c763e2587f29f00 | [
"MIT"
] | null | null | null | src/data_preparation/display_annotations.py | lukaszkepka/PostureGuard | ce603f8e802eba35729b25f03c763e2587f29f00 | [
"MIT"
] | 3 | 2021-12-23T10:36:45.000Z | 2022-01-24T06:55:34.000Z | import argparse
import os.path as path
import cv2
import pandas as pd
from numpy.core.multiarray import ndarray
from annotations import ImageAnnotation, data_frame_to_annotations_list
from drawing.image_overlay import ImageOverlayPipeline, TextImageOverlayStep, BoundingBoxImageOverlayStep, \
KeypointsImageOverlayStep
BOUNDING_BOX_COLOR = (0, 255, 255)
KEYPOINT_COLOR = (0, 255, 0)
TEXT_COLOR = (255, 0, 0)
def parse_args():
ap = argparse.ArgumentParser(description="Displays keypoints saved in annotations file")
ap.add_argument("-i", "--annotations_file_path", required=True,
help="Path to annotations in csv format")
return ap.parse_args()
def put_annotations_on_image(image: ndarray, image_annotations: ImageAnnotation):
keypoints = image_annotations.keypoints
image_overlay = ImageOverlayPipeline([
TextImageOverlayStep([image_annotations.file_path, image_annotations.class_name], text_color=TEXT_COLOR),
BoundingBoxImageOverlayStep(keypoints.bounding_box, color=BOUNDING_BOX_COLOR),
KeypointsImageOverlayStep(keypoints, keypoint_color=KEYPOINT_COLOR, text_color=TEXT_COLOR)
])
image_overlay.apply(image)
def display_annotations(image_annotations: ImageAnnotation):
image = cv2.imread(image_annotations.file_path)
put_annotations_on_image(image, image_annotations)
cv2.imshow('annotations', image)
cv2.waitKey()
def main(args):
if not path.exists(args.annotations_file_path):
print("File () doesn't exist".format(args.video_file_path))
return
annotations_data_frame = pd.read_csv(args.annotations_file_path)
annotations_list = data_frame_to_annotations_list(annotations_data_frame)
for annotation in annotations_list:
if not path.exists(annotation.file_path):
print("File () doesn't exist".format(args.video_file_path))
continue
display_annotations(annotation)
if __name__ == '__main__':
args = parse_args()
main(args)
| 32.5 | 113 | 0.755335 |
ace9aef06874b21a088eeeca1f7a48fcf1407f68 | 9,686 | py | Python | airflow_client/test/test_inline_response200.py | er1shivam/airflow-client-python | bf439f3f92b8248cb9f77adde250d4c4dbb860a9 | [
"Apache-2.0"
] | null | null | null | airflow_client/test/test_inline_response200.py | er1shivam/airflow-client-python | bf439f3f92b8248cb9f77adde250d4c4dbb860a9 | [
"Apache-2.0"
] | null | null | null | airflow_client/test/test_inline_response200.py | er1shivam/airflow-client-python | bf439f3f92b8248cb9f77adde250d4c4dbb860a9 | [
"Apache-2.0"
] | 1 | 2021-11-21T18:03:43.000Z | 2021-11-21T18:03:43.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Airflow API (Stable)
# Overview To facilitate management, Apache Airflow supports a range of REST API endpoints across its objects. This section provides an overview of the API design, methods, and supported use cases. Most of the endpoints accept `JSON` as input and return `JSON` responses. This means that you must usually add the following headers to your request: ``` Content-type: application/json Accept: application/json ``` ## Resources The term `resource` refers to a single type of object in the Airflow metadata. An API is broken up by its endpoint's corresponding resource. The name of a resource is typically plural and expressed in camelCase. Example: `dagRuns`. Resource names are used as part of endpoint URLs, as well as in API parameters and responses. ## CRUD Operations The platform supports **C**reate, **R**ead, **U**pdate, and **D**elete operations on most resources. You can review the standards for these operations and their standard parameters below. Some endpoints have special behavior as exceptions. ### Create To create a resource, you typically submit an HTTP `POST` request with the resource's required metadata in the request body. The response returns a `201 Created` response code upon success with the resource's metadata, including its internal `id`, in the response body. ### Read The HTTP `GET` request can be used to read a resource or to list a number of resources. A resource's `id` can be submitted in the request parameters to read a specific resource. The response usually returns a `200 OK` response code upon success, with the resource's metadata in the response body. If a `GET` request does not include a specific resource `id`, it is treated as a list request. The response usually returns a `200 OK` response code upon success, with an object containing a list of resources' metadata in the response body. When reading resources, some common query parameters are usually available. e.g.: ``` v1/connections?limit=25&offset=25 ``` |Query Parameter|Type|Description| |---------------|----|-----------| |limit|integer|Maximum number of objects to fetch. Usually 25 by default| |offset|integer|Offset after which to start returning objects. For use with limit query parameter.| ### Update Updating a resource requires the resource `id`, and is typically done using an HTTP `PATCH` request, with the fields to modify in the request body. The response usually returns a `200 OK` response code upon success, with information about the modified resource in the response body. ### Delete Deleting a resource requires the resource `id` and is typically executing via an HTTP `DELETE` request. The response usually returns a `204 No Content` response code upon success. ## Conventions - Resource names are plural and expressed in camelCase. - Names are consistent between URL parameter name and field name. - Field names are in snake_case. ```json { \"name\": \"string\", \"slots\": 0, \"occupied_slots\": 0, \"used_slots\": 0, \"queued_slots\": 0, \"open_slots\": 0 } ``` ### Update Mask Update mask is available as a query parameter in patch endpoints. It is used to notify the API which fields you want to update. Using `update_mask` makes it easier to update objects by helping the server know which fields to update in an object instead of updating all fields. The update request ignores any fields that aren't specified in the field mask, leaving them with their current values. Example: ``` resource = request.get('/resource/my-id').json() resource['my_field'] = 'new-value' request.patch('/resource/my-id?update_mask=my_field', data=json.dumps(resource)) ``` ## Versioning and Endpoint Lifecycle - API versioning is not synchronized to specific releases of the Apache Airflow. - APIs are designed to be backward compatible. - Any changes to the API will first go through a deprecation phase. # Summary of Changes | Airflow version | Description | |-|-| | v2.0 | Initial release | | v2.0.2 | Added /plugins endpoint | | v2.1 | New providers endpoint | # Trying the API You can use a third party airflow_client.client, such as [curl](https://curl.haxx.se/), [HTTPie](https://httpie.org/), [Postman](https://www.postman.com/) or [the Insomnia rest airflow_client.client](https://insomnia.rest/) to test the Apache Airflow API. Note that you will need to pass credentials data. For e.g., here is how to pause a DAG with [curl](https://curl.haxx.se/), when basic authorization is used: ```bash curl -X POST 'https://example.com/api/v1/dags/{dag_id}?update_mask=is_paused' \\ -H 'Content-Type: application/json' \\ --user \"username:password\" \\ -d '{ \"is_paused\": true }' ``` Using a graphical tool such as [Postman](https://www.postman.com/) or [Insomnia](https://insomnia.rest/), it is possible to import the API specifications directly: 1. Download the API specification by clicking the **Download** button at top of this document 2. Import the JSON specification in the graphical tool of your choice. - In *Postman*, you can click the **import** button at the top - With *Insomnia*, you can just drag-and-drop the file on the UI Note that with *Postman*, you can also generate code snippets by selecting a request and clicking on the **Code** button. ## Enabling CORS [Cross-origin resource sharing (CORS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) is a browser security feature that restricts HTTP requests that are initiated from scripts running in the browser. For details on enabling/configuring CORS, see [Enabling CORS](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html). # Authentication To be able to meet the requirements of many organizations, Airflow supports many authentication methods, and it is even possible to add your own method. If you want to check which auth backend is currently set, you can use `airflow config get-value api auth_backend` command as in the example below. ```bash $ airflow config get-value api auth_backend airflow.api.auth.backend.basic_auth ``` The default is to deny all requests. For details on configuring the authentication, see [API Authorization](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html). # Errors We follow the error response format proposed in [RFC 7807](https://tools.ietf.org/html/rfc7807) also known as Problem Details for HTTP APIs. As with our normal API responses, your airflow_client.client must be prepared to gracefully handle additional members of the response. ## Unauthenticated This indicates that the request has not been applied because it lacks valid authentication credentials for the target resource. Please check that you have valid credentials. ## PermissionDenied This response means that the server understood the request but refuses to authorize it because it lacks sufficient rights to the resource. It happens when you do not have the necessary permission to execute the action you performed. You need to get the appropriate permissions in other to resolve this error. ## BadRequest This response means that the server cannot or will not process the request due to something that is perceived to be a airflow_client.client error (e.g., malformed request syntax, invalid request message framing, or deceptive request routing). To resolve this, please ensure that your syntax is correct. ## NotFound This airflow_client.client error response indicates that the server cannot find the requested resource. ## MethodNotAllowed Indicates that the request method is known by the server but is not supported by the target resource. ## NotAcceptable The target resource does not have a current representation that would be acceptable to the user agent, according to the proactive negotiation header fields received in the request, and the server is unwilling to supply a default representation. ## AlreadyExists The request could not be completed due to a conflict with the current state of the target resource, meaning that the resource already exists ## Unknown This means that the server encountered an unexpected condition that prevented it from fulfilling the request. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: dev@airflow.apache.org
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import airflow_client.client
from airflow_client.client.model.inline_response200 import InlineResponse200
class TestInlineResponse200(unittest.TestCase):
"""InlineResponse200 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse200(self):
"""Test InlineResponse200"""
# FIXME: construct object with mandatory attributes with example values
# model = InlineResponse200() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 179.37037 | 8,160 | 0.760892 |
ace9af62953d6b85817b516303ec57b864d8f865 | 9,674 | py | Python | generator_utils.py | StuartCHAN/neural-qa | 42bbd997757bbea57f71398c4dd52d469a6916e9 | [
"MIT"
] | 5 | 2020-04-08T16:12:27.000Z | 2021-05-14T14:05:06.000Z | generator_utils.py | StuartCHAN/neural-qa | 42bbd997757bbea57f71398c4dd52d469a6916e9 | [
"MIT"
] | null | null | null | generator_utils.py | StuartCHAN/neural-qa | 42bbd997757bbea57f71398c4dd52d469a6916e9 | [
"MIT"
] | 1 | 2020-07-03T13:59:11.000Z | 2020-07-03T13:59:11.000Z | #!/usr/bin/env python
"""
Neural SPARQL Machines - Generator utils.
'SPARQL as a Foreign Language' by Tommaso Soru and Edgard Marx et al., SEMANTiCS 2017
https://w3id.org/neural-sparql-machines/soru-marx-semantics2017.html
https://arxiv.org/abs/1708.07624
Version 0.0.4
"""
import collections
import http.client as httplib
import json
import logging
import re
import sys
import urllib
import urllib.request as urllib2
from functools import reduce
ENDPOINT = "http://dbpedia.org/sparql"
GRAPH = "http://dbpedia.org"
def log_statistics( used_resources, special_classes, not_instanced_templates ):
total_number_of_resources = len(used_resources)
total_number_of_filled_placeholder_positions = sum(used_resources.values())
examples_per_instance = collections.Counter()
for resource in used_resources:
count = used_resources[resource]
examples_per_instance.update([count])
logging.info('{:6d} used resources in {} placeholder positions'.format(total_number_of_resources, total_number_of_filled_placeholder_positions))
for usage in examples_per_instance:
logging.info('{:6d} resources occur \t{:6d} times \t({:6.2f} %) '.format(examples_per_instance[usage], usage, examples_per_instance[usage]*100/total_number_of_resources))
for cl in special_classes:
logging.info('{} contains: {}'.format(cl, ', '.join(special_classes[cl])))
logging.info('{:6d} not instanciated templates:'.format(sum(not_instanced_templates.values())))
for template in not_instanced_templates:
logging.info('{}'.format(template))
def save_cache ( file, cache ):
ordered = collections.OrderedDict(cache.most_common())
with open(file, 'w') as outfile:
json.dump(ordered, outfile)
def query_dbpedia( query ):
param = dict()
param["default-graph-uri"] = GRAPH
param["query"] = query
param["format"] = "JSON"
param["CXML_redir_for_subjs"] = "121"
param["CXML_redir_for_hrefs"] = ""
param["timeout"] = "600"
param["debug"] = "on"
try:
#resp = urllib2.urlopen(ENDPOINT + "?" + urllib.urlencode(param))
resp = urllib2.urlopen(ENDPOINT + "?" + urllib.parse.urlencode(param))
print("\nparam--> ",param)
j = resp.read()
print("\nj-result--> ",j)
resp.close()
except (urllib2.HTTPError, httplib.BadStatusLine):
logging.debug("*** Query error. Empty result set. ***")
j = '{ "results": { "bindings": [] } }'
sys.stdout.flush()
return json.loads(j)
def strip_brackets(s):
s = re.sub(r'\([^)]*\)', '', s)
if "," in s:
s = s[:s.index(",")]
return s.strip().lower()
REPLACEMENTS = [
['dbo:', 'http://dbpedia.org/ontology/', 'dbo_'],
['dbp:', 'http://dbpedia.org/property/', 'dbp_'],
['dbc:', 'http://dbpedia.org/resource/Category:', 'dbc_'],
['dbr:', 'res:', 'http://dbpedia.org/resource/', 'dbr_'],
['dct:', 'dct_'],
['geo:', 'geo_'],
['georss:', 'georss_'],
['rdf:', 'rdf_'],
['rdfs:', 'rdfs_'],
['foaf:', 'foaf_'],
['owl:', 'owl_'],
['yago:', 'yago_'],
['skos:', 'skos_'],
[' ( ', ' par_open '],
[' ) ', ' par_close '],
['(', ' attr_open '],
[') ', ')', ' attr_close '],
['{', ' brack_open '],
['}', ' brack_close '],
[' . ', ' sep_dot '],
['. ', ' sep_dot '],
['?', 'var_'],
['*', 'wildcard'],
[' <= ', ' math_leq '],
[' >= ', ' math_geq '],
[' < ', ' math_lt '],
[' > ', ' math_gt ']
]
STANDARDS = {
'dbo_almaMater': ['dbp_almaMater'],
'dbo_award': ['dbp_awards'],
'dbo_birthPlace': ['dbp_birthPlace', 'dbp_placeOfBirth'],
'dbo_deathPlace': ['dbp_deathPlace', 'dbp_placeOfDeath'],
'dbo_child': ['dbp_children'],
'dbo_college': ['dbp_college'],
'dbo_hometown': ['dbp_hometown'],
'dbo_nationality': ['dbo_stateOfOrigin'],
'dbo_relative': ['dbp_relatives'],
'dbo_restingPlace': ['dbp_restingPlaces', 'dbp_placeOfBurial', 'dbo_placeOfBurial', 'dbp_restingplace'],
'dbo_spouse': ['dbp_spouse'],
'dbo_partner': ['dbp_partner']
}
def encode( sparql ):
encoded_sparql = do_replacements(sparql)
shorter_encoded_sparql = shorten_query(encoded_sparql)
normalized = normalize_predicates(shorter_encoded_sparql)
return normalized
def decode ( encoded_sparql ):
short_sparql = reverse_replacements(encoded_sparql)
sparql = reverse_shorten_query(short_sparql)
return sparql
def normalize_predicates( sparql ):
for standard in STANDARDS:
for alternative in STANDARDS[standard]:
sparql = sparql.replace(alternative, standard)
return sparql
def do_replacements( sparql ):
for r in REPLACEMENTS:
encoding = r[-1]
for original in r[:-1]:
sparql = sparql.replace(original, encoding)
return sparql
def reverse_replacements( sparql ):
for r in REPLACEMENTS:
original = r[0]
encoding = r[-1]
sparql = sparql.replace(encoding, original)
stripped_encoding = str.strip(encoding)
sparql = sparql.replace(stripped_encoding, original)
return sparql
def shorten_query( sparql ):
sparql = re.sub(r'order by desc\s+....?_open\s+([\S]+)\s+....?_close', '_obd_ \\1', sparql, flags=re.IGNORECASE)
sparql = re.sub(r'order by asc\s+....?_open\s+([\S]+)\s+....?_close', '_oba_ \\1', sparql, flags=re.IGNORECASE)
sparql = re.sub(r'order by\s+([\S]+)', '_oba_ \\1', sparql, flags=re.IGNORECASE)
return sparql
def reverse_shorten_query( sparql ):
sparql = re.sub(r'_oba_ ([\S]+)', 'order by asc (\\1)', sparql, flags=re.IGNORECASE)
sparql = re.sub(r'_obd_ ([\S]+)', 'order by desc (\\1)', sparql, flags=re.IGNORECASE)
return sparql
def read_template_file(file):
annotations = list()
line_number = 1
with open(file) as f:
for line in f:
values = line[:-1].split(';')
target_classes = [values[0] or None, values[1] or None, values[2] or None]
question = values[3]
query = values[4]
generator_query = values[5]
id = values[6] if (len(values) >= 7 and values[6]) else line_number
line_number += 1
annotation = Annotation(question, query, generator_query, id, target_classes)
annotations.append(annotation)
return annotations
class Annotation:
def __init__(self, question, query, generator_query, id=None, target_classes=None):
self.question = question
self.query = query
self.generator_query = generator_query
self.id = id
self.target_classes = target_classes if target_classes != None else []
self.variables = extract_variables(generator_query)
def extract_variables(query):
variables = []
query_form_pattern = r'^.*?where'
query_form_match = re.search(query_form_pattern, query, re.IGNORECASE)
if query_form_match:
letter_pattern = r'\?(\w)'
variables = re.findall(letter_pattern, query_form_match.group(0))
return variables
def extract_encoded_entities( encoded_sparql ):
sparql = decode(encoded_sparql)
entities = extract_entities(sparql)
encoded_entities = map(encode, entities)
return encoded_entities
def extract_entities( sparql ):
triples = extractTriples(sparql)
entities = set()
for triple in triples:
possible_entities = [triple['subject'], triple['object']]
sorted_out = filter(lambda e : not e.startswith('?') and ':' in e, possible_entities)
entities = entities.union(map(lambda e : re.sub(r'^optional{', '', e, flags=re.IGNORECASE), sorted_out))
return entities
def extract_predicates( sparql ):
triples = extractTriples(sparql)
predicates = set()
for triple in triples:
pred = triple['predicate']
predicates.add(pred)
return predicates
def extractTriples (sparqlQuery):
triples = []
whereStatementPattern = r'where\s*?{(.*?)}'
whereStatementMatch = re.search(whereStatementPattern, sparqlQuery, re.IGNORECASE)
if whereStatementMatch:
whereStatement = whereStatementMatch.group(1)
triples = splitIntoTriples(whereStatement)
return triples
def splitIntoTriples (whereStatement):
tripleAndSeparators = re.split('(\.[\s\?\<$])', whereStatement)
trimmed = map(lambda str : str.strip(), tripleAndSeparators)
def repair (list, element):
if element not in ['.', '.?', '.<']:
previousElement = list[-1]
del list[-1]
if previousElement in ['.', '.?', '.<']:
cutoff = previousElement[1] if previousElement in ['.?', '.<'] else ''
list.append(cutoff + element)
else:
list.append(previousElement + ' ' + element)
else:
list.append(element)
return list
tripleStatements = reduce(repair, trimmed, [''])
triplesWithNones = map(splitIntoTripleParts, tripleStatements)
triples = filter(lambda triple : triple != None, triplesWithNones)
return triples
def splitIntoTripleParts (triple):
statementPattern = r'(\S+)\s+(\S+)\s+(\S+)'
statementPatternMatch = re.search(statementPattern, triple)
if statementPatternMatch:
return {
'subject': statementPatternMatch.group(1),
'predicate': statementPatternMatch.group(2),
'object': statementPatternMatch.group(3)
}
else:
return None
def fix_URI(query):
query = re.sub(r"dbr:([^\s]+)" , r"<http://dbpedia.org/resource/\1>" , query)
if query[-2:]=="}>":
query = query[:-2]+">}"
return query
| 33.243986 | 178 | 0.628385 |
ace9b045452cfb3fa122f43754c00724a723337d | 373 | py | Python | counter.py | broccoli-e/chibi | 8534ecd8f0b72d228d1fbf246a7c667367c3e6d7 | [
"MIT"
] | null | null | null | counter.py | broccoli-e/chibi | 8534ecd8f0b72d228d1fbf246a7c667367c3e6d7 | [
"MIT"
] | null | null | null | counter.py | broccoli-e/chibi | 8534ecd8f0b72d228d1fbf246a7c667367c3e6d7 | [
"MIT"
] | null | null | null | class Counter(object):
def __init__(self):
self.cnt = 0
def count(self):
self.cnt += 1
def doublecount(self):
self.cnt += 2
def reset(self):
self.cnt =0
def show(self):
print(self.cnt)
def __repr__(self):
return str(self.cnt)
c=Counter()
c.show()
c.doublecount()
c.show()
print(type(c))
print(c) | 19.631579 | 28 | 0.55496 |
ace9b04cec446a3cddc325d24c9004e091c6d98d | 29,706 | py | Python | frappe/model/db_query.py | abhishekbalam/frappe | 75326b2f23e433f356b699c4f7aa954d329150c5 | [
"MIT"
] | 1 | 2020-11-28T14:00:37.000Z | 2020-11-28T14:00:37.000Z | frappe/model/db_query.py | abhishekbalam/frappe | 75326b2f23e433f356b699c4f7aa954d329150c5 | [
"MIT"
] | null | null | null | frappe/model/db_query.py | abhishekbalam/frappe | 75326b2f23e433f356b699c4f7aa954d329150c5 | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from six import iteritems, string_types
"""build query for doclistview and return results"""
import frappe.defaults
import frappe.share
from frappe import _
import frappe.permissions
from datetime import datetime
import frappe, json, copy, re
from frappe.model import optional_fields
from frappe.client import check_parent_permission
from frappe.model.utils.user_settings import get_user_settings, update_user_settings
from frappe.utils import flt, cint, get_time, make_filter_tuple, get_filter, add_to_date, cstr, get_timespan_date_range
from frappe.model.meta import get_table_columns
class DatabaseQuery(object):
def __init__(self, doctype, user=None):
self.doctype = doctype
self.tables = []
self.conditions = []
self.or_conditions = []
self.fields = None
self.user = user or frappe.session.user
self.ignore_ifnull = False
self.flags = frappe._dict()
self.reference_doctype = None
def execute(self, query=None, fields=None, filters=None, or_filters=None,
docstatus=None, group_by=None, order_by=None, limit_start=False,
limit_page_length=None, as_list=False, with_childnames=False, debug=False,
ignore_permissions=False, user=None, with_comment_count=False,
join='left join', distinct=False, start=None, page_length=None, limit=None,
ignore_ifnull=False, save_user_settings=False, save_user_settings_fields=False,
update=None, add_total_row=None, user_settings=None, reference_doctype=None,
return_query=False, strict=True, pluck=None, ignore_ddl=False):
if not ignore_permissions and not frappe.has_permission(self.doctype, "read", user=user):
frappe.flags.error_message = _('Insufficient Permission for {0}').format(frappe.bold(self.doctype))
raise frappe.PermissionError(self.doctype)
# filters and fields swappable
# its hard to remember what comes first
if (isinstance(fields, dict)
or (isinstance(fields, list) and fields and isinstance(fields[0], list))):
# if fields is given as dict/list of list, its probably filters
filters, fields = fields, filters
elif fields and isinstance(filters, list) \
and len(filters) > 1 and isinstance(filters[0], string_types):
# if `filters` is a list of strings, its probably fields
filters, fields = fields, filters
if fields:
self.fields = fields
else:
if pluck:
self.fields = ["`tab{0}`.`{1}`".format(self.doctype, pluck)]
else:
self.fields = ["`tab{0}`.`name`".format(self.doctype)]
if start: limit_start = start
if page_length: limit_page_length = page_length
if limit: limit_page_length = limit
self.filters = filters or []
self.or_filters = or_filters or []
self.docstatus = docstatus or []
self.group_by = group_by
self.order_by = order_by
self.limit_start = 0 if (limit_start is False) else cint(limit_start)
self.limit_page_length = cint(limit_page_length) if limit_page_length else None
self.with_childnames = with_childnames
self.debug = debug
self.join = join
self.distinct = distinct
self.as_list = as_list
self.ignore_ifnull = ignore_ifnull
self.flags.ignore_permissions = ignore_permissions
self.user = user or frappe.session.user
self.update = update
self.user_settings_fields = copy.deepcopy(self.fields)
self.return_query = return_query
self.strict = strict
self.ignore_ddl = ignore_ddl
# for contextual user permission check
# to determine which user permission is applicable on link field of specific doctype
self.reference_doctype = reference_doctype or self.doctype
if user_settings:
self.user_settings = json.loads(user_settings)
self.columns = self.get_table_columns()
# no table & ignore_ddl, return
if not self.columns: return []
if query:
result = self.run_custom_query(query)
else:
result = self.build_and_run()
if return_query:
return result
if with_comment_count and not as_list and self.doctype:
self.add_comment_count(result)
if save_user_settings:
self.save_user_settings_fields = save_user_settings_fields
self.update_user_settings()
if pluck:
return [d[pluck] for d in result]
return result
def build_and_run(self):
args = self.prepare_args()
args.limit = self.add_limit()
if args.conditions:
args.conditions = "where " + args.conditions
if self.distinct:
args.fields = 'distinct ' + args.fields
args.order_by = '' # TODO: recheck for alternative
query = """select %(fields)s
from %(tables)s
%(conditions)s
%(group_by)s
%(order_by)s
%(limit)s""" % args
if self.return_query:
return query
else:
return frappe.db.sql(query, as_dict=not self.as_list, debug=self.debug,
update=self.update, ignore_ddl=self.ignore_ddl)
def prepare_args(self):
self.parse_args()
self.sanitize_fields()
self.extract_tables()
self.set_optional_columns()
self.build_conditions()
args = frappe._dict()
if self.with_childnames:
for t in self.tables:
if t != "`tab" + self.doctype + "`":
self.fields.append(t + ".name as '%s:name'" % t[4:-1])
# query dict
args.tables = self.tables[0]
# left join parent, child tables
for child in self.tables[1:]:
args.tables += " {join} {child} on ({child}.parent = {main}.name)".format(join=self.join,
child=child, main=self.tables[0])
if self.grouped_or_conditions:
self.conditions.append("({0})".format(" or ".join(self.grouped_or_conditions)))
args.conditions = ' and '.join(self.conditions)
if self.or_conditions:
args.conditions += (' or ' if args.conditions else "") + \
' or '.join(self.or_conditions)
self.set_field_tables()
fields = []
# Wrapping fields with grave quotes to allow support for sql keywords
# TODO: Add support for wrapping fields with sql functions and distinct keyword
for field in self.fields:
stripped_field = field.strip().lower()
skip_wrapping = any([
stripped_field.startswith(("`", "*", '"', "'")),
"(" in stripped_field,
"distinct" in stripped_field,
])
if skip_wrapping:
fields.append(field)
elif "as" in field.lower().split(" "):
col, _, new = field.split()
fields.append("`{0}` as {1}".format(col, new))
else:
fields.append("`{0}`".format(field))
args.fields = ", ".join(fields)
self.set_order_by(args)
self.validate_order_by_and_group_by(args.order_by)
args.order_by = args.order_by and (" order by " + args.order_by) or ""
self.validate_order_by_and_group_by(self.group_by)
args.group_by = self.group_by and (" group by " + self.group_by) or ""
return args
def parse_args(self):
"""Convert fields and filters from strings to list, dicts"""
if isinstance(self.fields, string_types):
if self.fields == "*":
self.fields = ["*"]
else:
try:
self.fields = json.loads(self.fields)
except ValueError:
self.fields = [f.strip() for f in self.fields.split(",")]
# remove empty strings / nulls in fields
self.fields = [f for f in self.fields if f]
for filter_name in ["filters", "or_filters"]:
filters = getattr(self, filter_name)
if isinstance(filters, string_types):
filters = json.loads(filters)
if isinstance(filters, dict):
fdict = filters
filters = []
for key, value in iteritems(fdict):
filters.append(make_filter_tuple(self.doctype, key, value))
setattr(self, filter_name, filters)
def sanitize_fields(self):
'''
regex : ^.*[,();].*
purpose : The regex will look for malicious patterns like `,`, '(', ')', '@', ;' in each
field which may leads to sql injection.
example :
field = "`DocType`.`issingle`, version()"
As field contains `,` and mysql function `version()`, with the help of regex
the system will filter out this field.
'''
sub_query_regex = re.compile("^.*[,();@].*")
blacklisted_keywords = ['select', 'create', 'insert', 'delete', 'drop', 'update', 'case', 'show']
blacklisted_functions = ['concat', 'concat_ws', 'if', 'ifnull', 'nullif', 'coalesce',
'connection_id', 'current_user', 'database', 'last_insert_id', 'session_user',
'system_user', 'user', 'version', 'global']
def _raise_exception():
frappe.throw(_('Use of sub-query or function is restricted'), frappe.DataError)
def _is_query(field):
if re.compile(r"^(select|delete|update|drop|create)\s").match(field):
_raise_exception()
elif re.compile(r"\s*[0-9a-zA-z]*\s*( from | group by | order by | where | join )").match(field):
_raise_exception()
for field in self.fields:
if sub_query_regex.match(field):
if any(keyword in field.lower().split() for keyword in blacklisted_keywords):
_raise_exception()
if any("({0}".format(keyword) in field.lower() for keyword in blacklisted_keywords):
_raise_exception()
if any("{0}(".format(keyword) in field.lower() for keyword in blacklisted_functions):
_raise_exception()
if '@' in field.lower():
# prevent access to global variables
_raise_exception()
if re.compile(r"[0-9a-zA-Z]+\s*'").match(field):
_raise_exception()
if re.compile(r"[0-9a-zA-Z]+\s*,").match(field):
_raise_exception()
_is_query(field)
if self.strict:
if re.compile(r".*/\*.*").match(field):
frappe.throw(_('Illegal SQL Query'))
if re.compile(r".*\s(union).*\s").match(field.lower()):
frappe.throw(_('Illegal SQL Query'))
def extract_tables(self):
"""extract tables from fields"""
self.tables = ['`tab' + self.doctype + '`']
# add tables from fields
if self.fields:
for f in self.fields:
if ( not ("tab" in f and "." in f) ) or ("locate(" in f) or ("strpos(" in f) or \
("count(" in f) or ("avg(" in f) or ("sum(" in f) or ("extract(" in f) or ("dayofyear(" in f):
continue
table_name = f.split('.')[0]
if table_name.lower().startswith('group_concat('):
table_name = table_name[13:]
if table_name.lower().startswith('ifnull('):
table_name = table_name[7:]
if not table_name[0]=='`':
table_name = '`' + table_name + '`'
if not table_name in self.tables:
self.append_table(table_name)
def append_table(self, table_name):
self.tables.append(table_name)
doctype = table_name[4:-1]
if (not self.flags.ignore_permissions) and (not frappe.has_permission(doctype)):
frappe.flags.error_message = _('Insufficient Permission for {0}').format(frappe.bold(doctype))
raise frappe.PermissionError(doctype)
def set_field_tables(self):
'''If there are more than one table, the fieldname must not be ambiguous.
If the fieldname is not explicitly mentioned, set the default table'''
def _in_standard_sql_methods(field):
methods = ('count(', 'avg(', 'sum(', 'extract(', 'dayofyear(')
return field.lower().startswith(methods)
if len(self.tables) > 1:
for idx, field in enumerate(self.fields):
if '.' not in field and not _in_standard_sql_methods(field):
self.fields[idx] = '{0}.{1}'.format(self.tables[0], field)
def get_table_columns(self):
try:
return get_table_columns(self.doctype)
except frappe.db.TableMissingError:
if self.ignore_ddl:
return None
else:
raise
def set_optional_columns(self):
"""Removes optional columns like `_user_tags`, `_comments` etc. if not in table"""
# remove from fields
to_remove = []
for fld in self.fields:
for f in optional_fields:
if f in fld and not f in self.columns:
to_remove.append(fld)
for fld in to_remove:
del self.fields[self.fields.index(fld)]
# remove from filters
to_remove = []
for each in self.filters:
if isinstance(each, string_types):
each = [each]
for element in each:
if element in optional_fields and element not in self.columns:
to_remove.append(each)
for each in to_remove:
if isinstance(self.filters, dict):
del self.filters[each]
else:
self.filters.remove(each)
def build_conditions(self):
self.conditions = []
self.grouped_or_conditions = []
self.build_filter_conditions(self.filters, self.conditions)
self.build_filter_conditions(self.or_filters, self.grouped_or_conditions)
# match conditions
if not self.flags.ignore_permissions:
match_conditions = self.build_match_conditions()
if match_conditions:
self.conditions.append("(" + match_conditions + ")")
def build_filter_conditions(self, filters, conditions, ignore_permissions=None):
"""build conditions from user filters"""
if ignore_permissions is not None:
self.flags.ignore_permissions = ignore_permissions
if isinstance(filters, dict):
filters = [filters]
for f in filters:
if isinstance(f, string_types):
conditions.append(f)
else:
conditions.append(self.prepare_filter_condition(f))
def prepare_filter_condition(self, f):
"""Returns a filter condition in the format:
ifnull(`tabDocType`.`fieldname`, fallback) operator "value"
"""
from frappe.boot import get_additional_filters_from_hooks
additional_filters_config = get_additional_filters_from_hooks()
f = get_filter(self.doctype, f, additional_filters_config)
tname = ('`tab' + f.doctype + '`')
if not tname in self.tables:
self.append_table(tname)
if 'ifnull(' in f.fieldname:
column_name = f.fieldname
else:
column_name = '{tname}.{fname}'.format(tname=tname,
fname=f.fieldname)
can_be_null = True
if f.operator.lower() in additional_filters_config:
f.update(get_additional_filter_field(additional_filters_config, f, f.value))
# prepare in condition
if f.operator.lower() in ('ancestors of', 'descendants of', 'not ancestors of', 'not descendants of'):
values = f.value or ''
# TODO: handle list and tuple
# if not isinstance(values, (list, tuple)):
# values = values.split(",")
ref_doctype = f.doctype
if frappe.get_meta(f.doctype).get_field(f.fieldname) is not None :
ref_doctype = frappe.get_meta(f.doctype).get_field(f.fieldname).options
result=[]
lft, rgt = '', ''
if f.value:
lft, rgt = frappe.db.get_value(ref_doctype, f.value, ["lft", "rgt"])
# Get descendants elements of a DocType with a tree structure
if f.operator.lower() in ('descendants of', 'not descendants of') :
result = frappe.get_all(ref_doctype, filters={
'lft': ['>', lft],
'rgt': ['<', rgt]
}, order_by='`lft` ASC')
else :
# Get ancestor elements of a DocType with a tree structure
result = frappe.get_all(ref_doctype, filters={
'lft': ['<', lft],
'rgt': ['>', rgt]
}, order_by='`lft` DESC')
fallback = "''"
value = [frappe.db.escape((v.name or '').strip(), percent=False) for v in result]
if len(value):
value = "({0})".format(", ".join(value))
else:
value = "('')"
# changing operator to IN as the above code fetches all the parent / child values and convert into tuple
# which can be directly used with IN operator to query.
f.operator = 'not in' if f.operator.lower() in ('not ancestors of', 'not descendants of') else 'in'
elif f.operator.lower() in ('in', 'not in'):
values = f.value or ''
if isinstance(values, frappe.string_types):
values = values.split(",")
fallback = "''"
value = [frappe.db.escape((v or '').strip(), percent=False) for v in values]
if len(value):
value = "({0})".format(", ".join(value))
else:
value = "('')"
else:
df = frappe.get_meta(f.doctype).get("fields", {"fieldname": f.fieldname})
df = df[0] if df else None
if df and df.fieldtype in ("Check", "Float", "Int", "Currency", "Percent"):
can_be_null = False
if f.operator.lower() in ('previous', 'next', 'timespan'):
date_range = get_date_range(f.operator.lower(), f.value)
f.operator = "Between"
f.value = date_range
fallback = "'0001-01-01 00:00:00'"
if f.operator in ('>', '<') and (f.fieldname in ('creation', 'modified')):
value = cstr(f.value)
fallback = "NULL"
elif f.operator.lower() in ('between') and \
(f.fieldname in ('creation', 'modified') or (df and (df.fieldtype=="Date" or df.fieldtype=="Datetime"))):
value = get_between_date_filter(f.value, df)
fallback = "'0001-01-01 00:00:00'"
elif f.operator.lower() == "is":
if f.value == 'set':
f.operator = '!='
elif f.value == 'not set':
f.operator = '='
value = ""
fallback = "''"
can_be_null = True
if 'ifnull' not in column_name:
column_name = 'ifnull({}, {})'.format(column_name, fallback)
elif df and df.fieldtype=="Date":
value = frappe.db.format_date(f.value)
fallback = "'0001-01-01'"
elif (df and df.fieldtype=="Datetime") or isinstance(f.value, datetime):
value = frappe.db.format_datetime(f.value)
fallback = "'0001-01-01 00:00:00'"
elif df and df.fieldtype=="Time":
value = get_time(f.value).strftime("%H:%M:%S.%f")
fallback = "'00:00:00'"
elif f.operator.lower() in ("like", "not like") or (isinstance(f.value, string_types) and
(not df or df.fieldtype not in ["Float", "Int", "Currency", "Percent", "Check"])):
value = "" if f.value==None else f.value
fallback = "''"
if f.operator.lower() in ("like", "not like") and isinstance(value, string_types):
# because "like" uses backslash (\) for escaping
value = value.replace("\\", "\\\\").replace("%", "%%")
elif f.operator == '=' and df and df.fieldtype in ['Link', 'Data']: # TODO: Refactor if possible
value = f.value or "''"
fallback = "''"
elif f.fieldname == 'name':
value = f.value or "''"
fallback = "''"
else:
value = flt(f.value)
fallback = 0
# escape value
if isinstance(value, string_types) and not f.operator.lower() == 'between':
value = "{0}".format(frappe.db.escape(value, percent=False))
if (self.ignore_ifnull
or not can_be_null
or (f.value and f.operator.lower() in ('=', 'like'))
or 'ifnull(' in column_name.lower()):
if f.operator.lower() == 'like' and frappe.conf.get('db_type') == 'postgres':
f.operator = 'ilike'
condition = '{column_name} {operator} {value}'.format(
column_name=column_name, operator=f.operator,
value=value)
else:
condition = 'ifnull({column_name}, {fallback}) {operator} {value}'.format(
column_name=column_name, fallback=fallback, operator=f.operator,
value=value)
return condition
def build_match_conditions(self, as_condition=True):
"""add match conditions if applicable"""
self.match_filters = []
self.match_conditions = []
only_if_shared = False
if not self.user:
self.user = frappe.session.user
if not self.tables: self.extract_tables()
meta = frappe.get_meta(self.doctype)
role_permissions = frappe.permissions.get_role_permissions(meta, user=self.user)
self.shared = frappe.share.get_shared(self.doctype, self.user)
if (not meta.istable and
not role_permissions.get("read") and
not self.flags.ignore_permissions and
not has_any_user_permission_for_doctype(self.doctype, self.user, self.reference_doctype)):
only_if_shared = True
if not self.shared:
frappe.throw(_("No permission to read {0}").format(self.doctype), frappe.PermissionError)
else:
self.conditions.append(self.get_share_condition())
else:
#if has if_owner permission skip user perm check
if role_permissions.get("if_owner", {}).get("read"):
self.match_conditions.append("`tab{0}`.`owner` = {1}".format(self.doctype,
frappe.db.escape(self.user, percent=False)))
# add user permission only if role has read perm
elif role_permissions.get("read"):
# get user permissions
user_permissions = frappe.permissions.get_user_permissions(self.user)
self.add_user_permissions(user_permissions)
if as_condition:
conditions = ""
if self.match_conditions:
# will turn out like ((blog_post in (..) and blogger in (...)) or (blog_category in (...)))
conditions = "((" + ") or (".join(self.match_conditions) + "))"
doctype_conditions = self.get_permission_query_conditions()
if doctype_conditions:
conditions += (' and ' + doctype_conditions) if conditions else doctype_conditions
# share is an OR condition, if there is a role permission
if not only_if_shared and self.shared and conditions:
conditions = "({conditions}) or ({shared_condition})".format(
conditions=conditions, shared_condition=self.get_share_condition())
return conditions
else:
return self.match_filters
def get_share_condition(self):
return """`tab{0}`.name in ({1})""".format(self.doctype, ", ".join(["%s"] * len(self.shared))) % \
tuple([frappe.db.escape(s, percent=False) for s in self.shared])
def add_user_permissions(self, user_permissions):
meta = frappe.get_meta(self.doctype)
doctype_link_fields = []
doctype_link_fields = meta.get_link_fields()
# append current doctype with fieldname as 'name' as first link field
doctype_link_fields.append(dict(
options=self.doctype,
fieldname='name',
))
match_filters = {}
match_conditions = []
for df in doctype_link_fields:
if df.get('ignore_user_permissions'): continue
user_permission_values = user_permissions.get(df.get('options'), {})
if user_permission_values:
docs = []
if frappe.get_system_settings("apply_strict_user_permissions"):
condition = ""
else:
empty_value_condition = "ifnull(`tab{doctype}`.`{fieldname}`, '')=''".format(
doctype=self.doctype, fieldname=df.get('fieldname')
)
condition = empty_value_condition + " or "
for permission in user_permission_values:
if not permission.get('applicable_for'):
docs.append(permission.get('doc'))
# append docs based on user permission applicable on reference doctype
# this is useful when getting list of docs from a link field
# in this case parent doctype of the link
# will be the reference doctype
elif df.get('fieldname') == 'name' and self.reference_doctype:
if permission.get('applicable_for') == self.reference_doctype:
docs.append(permission.get('doc'))
elif permission.get('applicable_for') == self.doctype:
docs.append(permission.get('doc'))
if docs:
condition += "`tab{doctype}`.`{fieldname}` in ({values})".format(
doctype=self.doctype,
fieldname=df.get('fieldname'),
values=", ".join(
[(frappe.db.escape(doc, percent=False)) for doc in docs])
)
match_conditions.append("({condition})".format(condition=condition))
match_filters[df.get('options')] = docs
if match_conditions:
self.match_conditions.append(" and ".join(match_conditions))
if match_filters:
self.match_filters.append(match_filters)
def get_permission_query_conditions(self):
condition_methods = frappe.get_hooks("permission_query_conditions", {}).get(self.doctype, [])
if condition_methods:
conditions = []
for method in condition_methods:
c = frappe.call(frappe.get_attr(method), self.user)
if c:
conditions.append(c)
return " and ".join(conditions) if conditions else None
def run_custom_query(self, query):
if '%(key)s' in query:
query = query.replace('%(key)s', '`name`')
return frappe.db.sql(query, as_dict = (not self.as_list))
def set_order_by(self, args):
meta = frappe.get_meta(self.doctype)
if self.order_by:
args.order_by = self.order_by
else:
args.order_by = ""
# don't add order by from meta if a mysql group function is used without group by clause
group_function_without_group_by = (len(self.fields)==1 and
( self.fields[0].lower().startswith("count(")
or self.fields[0].lower().startswith("min(")
or self.fields[0].lower().startswith("max(")
) and not self.group_by)
if not group_function_without_group_by:
sort_field = sort_order = None
if meta.sort_field and ',' in meta.sort_field:
# multiple sort given in doctype definition
# Example:
# `idx desc, modified desc`
# will covert to
# `tabItem`.`idx` desc, `tabItem`.`modified` desc
args.order_by = ', '.join(['`tab{0}`.`{1}` {2}'.format(self.doctype,
f.split()[0].strip(), f.split()[1].strip()) for f in meta.sort_field.split(',')])
else:
sort_field = meta.sort_field or 'modified'
sort_order = (meta.sort_field and meta.sort_order) or 'desc'
args.order_by = "`tab{0}`.`{1}` {2}".format(self.doctype, sort_field or "modified", sort_order or "desc")
# draft docs always on top
if meta.is_submittable:
args.order_by = "`tab{0}`.docstatus asc, {1}".format(self.doctype, args.order_by)
def validate_order_by_and_group_by(self, parameters):
"""Check order by, group by so that atleast one column is selected and does not have subquery"""
if not parameters:
return
_lower = parameters.lower()
if 'select' in _lower and ' from ' in _lower:
frappe.throw(_('Cannot use sub-query in order by'))
if re.compile(r".*[^a-z0-9-_ ,`'\"\.\(\)].*").match(_lower):
frappe.throw(_('Illegal SQL Query'))
for field in parameters.split(","):
if "." in field and field.strip().startswith("`tab"):
tbl = field.strip().split('.')[0]
if tbl not in self.tables:
if tbl.startswith('`'):
tbl = tbl[4:-1]
frappe.throw(_("Please select atleast 1 column from {0} to sort/group").format(tbl))
def add_limit(self):
if self.limit_page_length:
return 'limit %s offset %s' % (self.limit_page_length, self.limit_start)
else:
return ''
def add_comment_count(self, result):
for r in result:
if not r.name:
continue
r._comment_count = 0
if "_comments" in r:
r._comment_count = len(json.loads(r._comments or "[]"))
def update_user_settings(self):
# update user settings if new search
user_settings = json.loads(get_user_settings(self.doctype))
if hasattr(self, 'user_settings'):
user_settings.update(self.user_settings)
if self.save_user_settings_fields:
user_settings['fields'] = self.user_settings_fields
update_user_settings(self.doctype, user_settings)
def get_order_by(doctype, meta):
order_by = ""
sort_field = sort_order = None
if meta.sort_field and ',' in meta.sort_field:
# multiple sort given in doctype definition
# Example:
# `idx desc, modified desc`
# will covert to
# `tabItem`.`idx` desc, `tabItem`.`modified` desc
order_by = ', '.join(['`tab{0}`.`{1}` {2}'.format(doctype,
f.split()[0].strip(), f.split()[1].strip()) for f in meta.sort_field.split(',')])
else:
sort_field = meta.sort_field or 'modified'
sort_order = (meta.sort_field and meta.sort_order) or 'desc'
order_by = "`tab{0}`.`{1}` {2}".format(doctype, sort_field or "modified", sort_order or "desc")
# draft docs always on top
if meta.is_submittable:
order_by = "`tab{0}`.docstatus asc, {1}".format(doctype, order_by)
return order_by
@frappe.whitelist()
def get_list(doctype, *args, **kwargs):
'''wrapper for DatabaseQuery'''
kwargs.pop('cmd', None)
kwargs.pop('ignore_permissions', None)
kwargs.pop('data', None)
kwargs.pop('strict', None)
kwargs.pop('user', None)
# If doctype is child table
if frappe.is_table(doctype):
# Example frappe.db.get_list('Purchase Receipt Item', {'parent': 'Purchase Receipt'})
# Here purchase receipt is the parent doctype of the child doctype Purchase Receipt Item
if not kwargs.get('parent'):
frappe.flags.error_message = _('Parent is required to get child table data')
raise frappe.PermissionError(doctype)
check_parent_permission(kwargs.get('parent'), doctype)
del kwargs['parent']
return DatabaseQuery(doctype).execute(None, *args, **kwargs)
def is_parent_only_filter(doctype, filters):
#check if filters contains only parent doctype
only_parent_doctype = True
if isinstance(filters, list):
for flt in filters:
if doctype not in flt:
only_parent_doctype = False
if 'Between' in flt:
flt[3] = get_between_date_filter(flt[3])
return only_parent_doctype
def has_any_user_permission_for_doctype(doctype, user, applicable_for):
user_permissions = frappe.permissions.get_user_permissions(user=user)
doctype_user_permissions = user_permissions.get(doctype, [])
for permission in doctype_user_permissions:
if not permission.applicable_for or permission.applicable_for == applicable_for:
return True
return False
def get_between_date_filter(value, df=None):
'''
return the formattted date as per the given example
[u'2017-11-01', u'2017-11-03'] => '2017-11-01 00:00:00.000000' AND '2017-11-04 00:00:00.000000'
'''
from_date = frappe.utils.nowdate()
to_date = frappe.utils.nowdate()
if value and isinstance(value, (list, tuple)):
if len(value) >= 1: from_date = value[0]
if len(value) >= 2: to_date = value[1]
if not df or (df and df.fieldtype == 'Datetime'):
to_date = add_to_date(to_date, days=1)
if df and df.fieldtype == 'Datetime':
data = "'%s' AND '%s'" % (
frappe.db.format_datetime(from_date),
frappe.db.format_datetime(to_date))
else:
data = "'%s' AND '%s'" % (
frappe.db.format_date(from_date),
frappe.db.format_date(to_date))
return data
def get_additional_filter_field(additional_filters_config, f, value):
additional_filter = additional_filters_config[f.operator.lower()]
f = frappe._dict(frappe.get_attr(additional_filter['get_field'])())
if f.query_value:
for option in f.options:
option = frappe._dict(option)
if option.value == value:
f.value = option.query_value
return f
def get_date_range(operator, value):
timespan_map = {
'1 week': 'week',
'1 month': 'month',
'3 months': 'quarter',
'6 months': '6 months',
'1 year': 'year',
}
period_map = {
'previous': 'last',
'next': 'next',
}
timespan = period_map[operator] + ' ' + timespan_map[value] if operator != 'timespan' else value
return get_timespan_date_range(timespan)
| 32.788079 | 119 | 0.686865 |
ace9b0fc19b85378e95a42ba947c0ddca3220c5f | 92,132 | py | Python | pybind/slxos/v17r_2_00/bgp_state/route/evpn/l2/ethernet_segment/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v17r_2_00/bgp_state/route/evpn/l2/ethernet_segment/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v17r_2_00/bgp_state/route/evpn/l2/ethernet_segment/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class ethernet_segment(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-bgp-operational - based on the path /bgp-state/route/evpn/l2/ethernet-segment. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: EVPN L2 Routes type ethernet-segment information
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__prefix','__status','__age','__next_hop','__next_hop_learned_from_peer','__next_hop_metric','__rd','__vrf_label_direction','__local_preference','__med','__origin','__weight','__as_path','__communities','__extended_community','__atomic_aggregate_set','__aggregator','__originator','__cluster_list','__adj_rib_out_count','__admin_distance','__tag','__l3_label','__l2_label','__esi','__rmac','__source_rd','__vrf_label','__esi_value','__ipv4_address','__ipv6_address',)
_yang_name = 'ethernet-segment'
_rest_name = 'ethernet-segment'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__origin = YANGDynClass(base=unicode, is_leaf=True, yang_name="origin", rest_name="origin", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
self.__originator = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="originator", rest_name="originator", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-address', is_config=False)
self.__ipv6_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv6-address', is_config=False)
self.__weight = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="weight", rest_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
self.__atomic_aggregate_set = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="atomic-aggregate-set", rest_name="atomic-aggregate-set", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='boolean', is_config=False)
self.__rmac = YANGDynClass(base=unicode, is_leaf=True, yang_name="rmac", rest_name="rmac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
self.__as_path = YANGDynClass(base=unicode, is_leaf=True, yang_name="as-path", rest_name="as-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
self.__admin_distance = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="admin-distance", rest_name="admin-distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
self.__prefix = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-prefix', is_config=False)
self.__tag = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
self.__esi_value = YANGDynClass(base=unicode, is_leaf=True, yang_name="esi-value", rest_name="esi-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
self.__vrf_label = YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)), is_leaf=False, yang_name="vrf-label", rest_name="vrf-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='uint32', is_config=False)
self.__extended_community = YANGDynClass(base=unicode, is_leaf=True, yang_name="extended-community", rest_name="extended-community", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
self.__aggregator = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="aggregator", rest_name="aggregator", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-address', is_config=False)
self.__rd = YANGDynClass(base=unicode, is_leaf=True, yang_name="rd", rest_name="rd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
self.__local_preference = YANGDynClass(base=unicode, is_leaf=True, yang_name="local-preference", rest_name="local-preference", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
self.__next_hop_metric = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="next-hop-metric", rest_name="next-hop-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='uint32', is_config=False)
self.__esi = YANGDynClass(base=unicode, is_leaf=True, yang_name="esi", rest_name="esi", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
self.__status = YANGDynClass(base=unicode, is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
self.__med = YANGDynClass(base=unicode, is_leaf=True, yang_name="med", rest_name="med", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
self.__l3_label = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="l3-label", rest_name="l3-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
self.__cluster_list = YANGDynClass(base=unicode, is_leaf=True, yang_name="cluster-list", rest_name="cluster-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
self.__ipv4_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="ipv4-address", rest_name="ipv4-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-address', is_config=False)
self.__next_hop = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="next-hop", rest_name="next-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-address', is_config=False)
self.__next_hop_learned_from_peer = YANGDynClass(base=unicode, is_leaf=True, yang_name="next-hop-learned-from-peer", rest_name="next-hop-learned-from-peer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
self.__communities = YANGDynClass(base=unicode, is_leaf=True, yang_name="communities", rest_name="communities", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
self.__adj_rib_out_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="adj-rib-out-count", rest_name="adj-rib-out-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
self.__age = YANGDynClass(base=unicode, is_leaf=True, yang_name="age", rest_name="age", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
self.__vrf_label_direction = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'out': {'value': 1}, u'in': {'value': 0}},), is_leaf=True, yang_name="vrf-label-direction", rest_name="vrf-label-direction", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='direction', is_config=False)
self.__source_rd = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-rd", rest_name="source-rd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
self.__l2_label = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="l2-label", rest_name="l2-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'bgp-state', u'route', u'evpn', u'l2', u'ethernet-segment']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'bgp-state', u'route', u'evpn', u'l2', u'ethernet-segment']
def _get_prefix(self):
"""
Getter method for prefix, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/prefix (inet:ipv4-prefix)
YANG Description: Network Prefix
"""
return self.__prefix
def _set_prefix(self, v, load=False):
"""
Setter method for prefix, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/prefix (inet:ipv4-prefix)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix() directly.
YANG Description: Network Prefix
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-prefix', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """prefix must be of a type compatible with inet:ipv4-prefix""",
'defined-type': "inet:ipv4-prefix",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-prefix', is_config=False)""",
})
self.__prefix = t
if hasattr(self, '_set'):
self._set()
def _unset_prefix(self):
self.__prefix = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-prefix', is_config=False)
def _get_status(self):
"""
Getter method for status, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/status (string)
YANG Description: Status
"""
return self.__status
def _set_status(self, v, load=False):
"""
Setter method for status, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/status (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_status is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_status() directly.
YANG Description: Status
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """status must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)""",
})
self.__status = t
if hasattr(self, '_set'):
self._set()
def _unset_status(self):
self.__status = YANGDynClass(base=unicode, is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
def _get_age(self):
"""
Getter method for age, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/age (string)
YANG Description: Age
"""
return self.__age
def _set_age(self, v, load=False):
"""
Setter method for age, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/age (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_age is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_age() directly.
YANG Description: Age
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="age", rest_name="age", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """age must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="age", rest_name="age", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)""",
})
self.__age = t
if hasattr(self, '_set'):
self._set()
def _unset_age(self):
self.__age = YANGDynClass(base=unicode, is_leaf=True, yang_name="age", rest_name="age", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
def _get_next_hop(self):
"""
Getter method for next_hop, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/next_hop (inet:ipv4-address)
YANG Description: Next hop address
"""
return self.__next_hop
def _set_next_hop(self, v, load=False):
"""
Setter method for next_hop, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/next_hop (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_next_hop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_next_hop() directly.
YANG Description: Next hop address
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="next-hop", rest_name="next-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """next_hop must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="next-hop", rest_name="next-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-address', is_config=False)""",
})
self.__next_hop = t
if hasattr(self, '_set'):
self._set()
def _unset_next_hop(self):
self.__next_hop = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="next-hop", rest_name="next-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-address', is_config=False)
def _get_next_hop_learned_from_peer(self):
"""
Getter method for next_hop_learned_from_peer, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/next_hop_learned_from_peer (string)
YANG Description: Next hop address learned from peer
"""
return self.__next_hop_learned_from_peer
def _set_next_hop_learned_from_peer(self, v, load=False):
"""
Setter method for next_hop_learned_from_peer, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/next_hop_learned_from_peer (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_next_hop_learned_from_peer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_next_hop_learned_from_peer() directly.
YANG Description: Next hop address learned from peer
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="next-hop-learned-from-peer", rest_name="next-hop-learned-from-peer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """next_hop_learned_from_peer must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="next-hop-learned-from-peer", rest_name="next-hop-learned-from-peer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)""",
})
self.__next_hop_learned_from_peer = t
if hasattr(self, '_set'):
self._set()
def _unset_next_hop_learned_from_peer(self):
self.__next_hop_learned_from_peer = YANGDynClass(base=unicode, is_leaf=True, yang_name="next-hop-learned-from-peer", rest_name="next-hop-learned-from-peer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
def _get_next_hop_metric(self):
"""
Getter method for next_hop_metric, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/next_hop_metric (uint32)
YANG Description: Next hop metric
"""
return self.__next_hop_metric
def _set_next_hop_metric(self, v, load=False):
"""
Setter method for next_hop_metric, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/next_hop_metric (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_next_hop_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_next_hop_metric() directly.
YANG Description: Next hop metric
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="next-hop-metric", rest_name="next-hop-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """next_hop_metric must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="next-hop-metric", rest_name="next-hop-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='uint32', is_config=False)""",
})
self.__next_hop_metric = t
if hasattr(self, '_set'):
self._set()
def _unset_next_hop_metric(self):
self.__next_hop_metric = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="next-hop-metric", rest_name="next-hop-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='uint32', is_config=False)
def _get_rd(self):
"""
Getter method for rd, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/rd (string)
YANG Description: Route distinguisher
"""
return self.__rd
def _set_rd(self, v, load=False):
"""
Setter method for rd, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/rd (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_rd is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rd() directly.
YANG Description: Route distinguisher
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="rd", rest_name="rd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rd must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="rd", rest_name="rd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)""",
})
self.__rd = t
if hasattr(self, '_set'):
self._set()
def _unset_rd(self):
self.__rd = YANGDynClass(base=unicode, is_leaf=True, yang_name="rd", rest_name="rd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
def _get_vrf_label_direction(self):
"""
Getter method for vrf_label_direction, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/vrf_label_direction (direction)
YANG Description: VRF label direction
"""
return self.__vrf_label_direction
def _set_vrf_label_direction(self, v, load=False):
"""
Setter method for vrf_label_direction, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/vrf_label_direction (direction)
If this variable is read-only (config: false) in the
source YANG file, then _set_vrf_label_direction is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vrf_label_direction() directly.
YANG Description: VRF label direction
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'out': {'value': 1}, u'in': {'value': 0}},), is_leaf=True, yang_name="vrf-label-direction", rest_name="vrf-label-direction", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='direction', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vrf_label_direction must be of a type compatible with direction""",
'defined-type': "brocade-bgp-operational:direction",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'out': {'value': 1}, u'in': {'value': 0}},), is_leaf=True, yang_name="vrf-label-direction", rest_name="vrf-label-direction", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='direction', is_config=False)""",
})
self.__vrf_label_direction = t
if hasattr(self, '_set'):
self._set()
def _unset_vrf_label_direction(self):
self.__vrf_label_direction = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'out': {'value': 1}, u'in': {'value': 0}},), is_leaf=True, yang_name="vrf-label-direction", rest_name="vrf-label-direction", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='direction', is_config=False)
def _get_local_preference(self):
"""
Getter method for local_preference, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/local_preference (string)
YANG Description: Local Preference
"""
return self.__local_preference
def _set_local_preference(self, v, load=False):
"""
Setter method for local_preference, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/local_preference (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_local_preference is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_local_preference() directly.
YANG Description: Local Preference
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="local-preference", rest_name="local-preference", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """local_preference must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="local-preference", rest_name="local-preference", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)""",
})
self.__local_preference = t
if hasattr(self, '_set'):
self._set()
def _unset_local_preference(self):
self.__local_preference = YANGDynClass(base=unicode, is_leaf=True, yang_name="local-preference", rest_name="local-preference", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
def _get_med(self):
"""
Getter method for med, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/med (string)
YANG Description: Multi exit discriminator
"""
return self.__med
def _set_med(self, v, load=False):
"""
Setter method for med, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/med (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_med is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_med() directly.
YANG Description: Multi exit discriminator
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="med", rest_name="med", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """med must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="med", rest_name="med", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)""",
})
self.__med = t
if hasattr(self, '_set'):
self._set()
def _unset_med(self):
self.__med = YANGDynClass(base=unicode, is_leaf=True, yang_name="med", rest_name="med", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
def _get_origin(self):
"""
Getter method for origin, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/origin (string)
YANG Description: Origin
"""
return self.__origin
def _set_origin(self, v, load=False):
"""
Setter method for origin, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/origin (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_origin is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_origin() directly.
YANG Description: Origin
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="origin", rest_name="origin", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """origin must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="origin", rest_name="origin", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)""",
})
self.__origin = t
if hasattr(self, '_set'):
self._set()
def _unset_origin(self):
self.__origin = YANGDynClass(base=unicode, is_leaf=True, yang_name="origin", rest_name="origin", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
def _get_weight(self):
"""
Getter method for weight, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/weight (int32)
YANG Description: weight
"""
return self.__weight
def _set_weight(self, v, load=False):
"""
Setter method for weight, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/weight (int32)
If this variable is read-only (config: false) in the
source YANG file, then _set_weight is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_weight() directly.
YANG Description: weight
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="weight", rest_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """weight must be of a type compatible with int32""",
'defined-type': "int32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="weight", rest_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)""",
})
self.__weight = t
if hasattr(self, '_set'):
self._set()
def _unset_weight(self):
self.__weight = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="weight", rest_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
def _get_as_path(self):
"""
Getter method for as_path, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/as_path (string)
YANG Description: As Path
"""
return self.__as_path
def _set_as_path(self, v, load=False):
"""
Setter method for as_path, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/as_path (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_as_path is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_as_path() directly.
YANG Description: As Path
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="as-path", rest_name="as-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """as_path must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="as-path", rest_name="as-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)""",
})
self.__as_path = t
if hasattr(self, '_set'):
self._set()
def _unset_as_path(self):
self.__as_path = YANGDynClass(base=unicode, is_leaf=True, yang_name="as-path", rest_name="as-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
def _get_communities(self):
"""
Getter method for communities, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/communities (string)
YANG Description: Communities
"""
return self.__communities
def _set_communities(self, v, load=False):
"""
Setter method for communities, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/communities (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_communities is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_communities() directly.
YANG Description: Communities
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="communities", rest_name="communities", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """communities must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="communities", rest_name="communities", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)""",
})
self.__communities = t
if hasattr(self, '_set'):
self._set()
def _unset_communities(self):
self.__communities = YANGDynClass(base=unicode, is_leaf=True, yang_name="communities", rest_name="communities", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
def _get_extended_community(self):
"""
Getter method for extended_community, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/extended_community (string)
YANG Description: Communities
"""
return self.__extended_community
def _set_extended_community(self, v, load=False):
"""
Setter method for extended_community, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/extended_community (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_extended_community is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_extended_community() directly.
YANG Description: Communities
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="extended-community", rest_name="extended-community", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """extended_community must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="extended-community", rest_name="extended-community", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)""",
})
self.__extended_community = t
if hasattr(self, '_set'):
self._set()
def _unset_extended_community(self):
self.__extended_community = YANGDynClass(base=unicode, is_leaf=True, yang_name="extended-community", rest_name="extended-community", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
def _get_atomic_aggregate_set(self):
"""
Getter method for atomic_aggregate_set, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/atomic_aggregate_set (boolean)
YANG Description: Atomic aggregate set
"""
return self.__atomic_aggregate_set
def _set_atomic_aggregate_set(self, v, load=False):
"""
Setter method for atomic_aggregate_set, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/atomic_aggregate_set (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_atomic_aggregate_set is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_atomic_aggregate_set() directly.
YANG Description: Atomic aggregate set
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="atomic-aggregate-set", rest_name="atomic-aggregate-set", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """atomic_aggregate_set must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="atomic-aggregate-set", rest_name="atomic-aggregate-set", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='boolean', is_config=False)""",
})
self.__atomic_aggregate_set = t
if hasattr(self, '_set'):
self._set()
def _unset_atomic_aggregate_set(self):
self.__atomic_aggregate_set = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="atomic-aggregate-set", rest_name="atomic-aggregate-set", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='boolean', is_config=False)
def _get_aggregator(self):
"""
Getter method for aggregator, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/aggregator (inet:ipv4-address)
YANG Description: Aggregator
"""
return self.__aggregator
def _set_aggregator(self, v, load=False):
"""
Setter method for aggregator, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/aggregator (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_aggregator is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_aggregator() directly.
YANG Description: Aggregator
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="aggregator", rest_name="aggregator", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """aggregator must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="aggregator", rest_name="aggregator", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-address', is_config=False)""",
})
self.__aggregator = t
if hasattr(self, '_set'):
self._set()
def _unset_aggregator(self):
self.__aggregator = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="aggregator", rest_name="aggregator", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-address', is_config=False)
def _get_originator(self):
"""
Getter method for originator, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/originator (inet:ipv4-address)
YANG Description: Originator Id
"""
return self.__originator
def _set_originator(self, v, load=False):
"""
Setter method for originator, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/originator (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_originator is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_originator() directly.
YANG Description: Originator Id
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="originator", rest_name="originator", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """originator must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="originator", rest_name="originator", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-address', is_config=False)""",
})
self.__originator = t
if hasattr(self, '_set'):
self._set()
def _unset_originator(self):
self.__originator = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="originator", rest_name="originator", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-address', is_config=False)
def _get_cluster_list(self):
"""
Getter method for cluster_list, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/cluster_list (string)
YANG Description: Cluster list
"""
return self.__cluster_list
def _set_cluster_list(self, v, load=False):
"""
Setter method for cluster_list, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/cluster_list (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_cluster_list is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cluster_list() directly.
YANG Description: Cluster list
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="cluster-list", rest_name="cluster-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cluster_list must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="cluster-list", rest_name="cluster-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)""",
})
self.__cluster_list = t
if hasattr(self, '_set'):
self._set()
def _unset_cluster_list(self):
self.__cluster_list = YANGDynClass(base=unicode, is_leaf=True, yang_name="cluster-list", rest_name="cluster-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
def _get_adj_rib_out_count(self):
"""
Getter method for adj_rib_out_count, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/adj_rib_out_count (int32)
YANG Description: Adj RIB out count
"""
return self.__adj_rib_out_count
def _set_adj_rib_out_count(self, v, load=False):
"""
Setter method for adj_rib_out_count, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/adj_rib_out_count (int32)
If this variable is read-only (config: false) in the
source YANG file, then _set_adj_rib_out_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_adj_rib_out_count() directly.
YANG Description: Adj RIB out count
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="adj-rib-out-count", rest_name="adj-rib-out-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """adj_rib_out_count must be of a type compatible with int32""",
'defined-type': "int32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="adj-rib-out-count", rest_name="adj-rib-out-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)""",
})
self.__adj_rib_out_count = t
if hasattr(self, '_set'):
self._set()
def _unset_adj_rib_out_count(self):
self.__adj_rib_out_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="adj-rib-out-count", rest_name="adj-rib-out-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
def _get_admin_distance(self):
"""
Getter method for admin_distance, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/admin_distance (int32)
YANG Description: Admin Distance
"""
return self.__admin_distance
def _set_admin_distance(self, v, load=False):
"""
Setter method for admin_distance, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/admin_distance (int32)
If this variable is read-only (config: false) in the
source YANG file, then _set_admin_distance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_admin_distance() directly.
YANG Description: Admin Distance
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="admin-distance", rest_name="admin-distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """admin_distance must be of a type compatible with int32""",
'defined-type': "int32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="admin-distance", rest_name="admin-distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)""",
})
self.__admin_distance = t
if hasattr(self, '_set'):
self._set()
def _unset_admin_distance(self):
self.__admin_distance = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="admin-distance", rest_name="admin-distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
def _get_tag(self):
"""
Getter method for tag, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/tag (int32)
YANG Description: Tag
"""
return self.__tag
def _set_tag(self, v, load=False):
"""
Setter method for tag, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/tag (int32)
If this variable is read-only (config: false) in the
source YANG file, then _set_tag is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tag() directly.
YANG Description: Tag
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tag must be of a type compatible with int32""",
'defined-type': "int32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)""",
})
self.__tag = t
if hasattr(self, '_set'):
self._set()
def _unset_tag(self):
self.__tag = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
def _get_l3_label(self):
"""
Getter method for l3_label, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/l3_label (int32)
YANG Description: L3 Label
"""
return self.__l3_label
def _set_l3_label(self, v, load=False):
"""
Setter method for l3_label, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/l3_label (int32)
If this variable is read-only (config: false) in the
source YANG file, then _set_l3_label is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_l3_label() directly.
YANG Description: L3 Label
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="l3-label", rest_name="l3-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """l3_label must be of a type compatible with int32""",
'defined-type': "int32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="l3-label", rest_name="l3-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)""",
})
self.__l3_label = t
if hasattr(self, '_set'):
self._set()
def _unset_l3_label(self):
self.__l3_label = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="l3-label", rest_name="l3-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
def _get_l2_label(self):
"""
Getter method for l2_label, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/l2_label (int32)
YANG Description: L2 Label
"""
return self.__l2_label
def _set_l2_label(self, v, load=False):
"""
Setter method for l2_label, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/l2_label (int32)
If this variable is read-only (config: false) in the
source YANG file, then _set_l2_label is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_l2_label() directly.
YANG Description: L2 Label
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="l2-label", rest_name="l2-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """l2_label must be of a type compatible with int32""",
'defined-type': "int32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="l2-label", rest_name="l2-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)""",
})
self.__l2_label = t
if hasattr(self, '_set'):
self._set()
def _unset_l2_label(self):
self.__l2_label = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), is_leaf=True, yang_name="l2-label", rest_name="l2-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='int32', is_config=False)
def _get_esi(self):
"""
Getter method for esi, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/esi (string)
YANG Description: ESI
"""
return self.__esi
def _set_esi(self, v, load=False):
"""
Setter method for esi, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/esi (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_esi is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_esi() directly.
YANG Description: ESI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="esi", rest_name="esi", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """esi must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="esi", rest_name="esi", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)""",
})
self.__esi = t
if hasattr(self, '_set'):
self._set()
def _unset_esi(self):
self.__esi = YANGDynClass(base=unicode, is_leaf=True, yang_name="esi", rest_name="esi", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
def _get_rmac(self):
"""
Getter method for rmac, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/rmac (string)
YANG Description: Router Mac
"""
return self.__rmac
def _set_rmac(self, v, load=False):
"""
Setter method for rmac, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/rmac (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_rmac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rmac() directly.
YANG Description: Router Mac
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="rmac", rest_name="rmac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rmac must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="rmac", rest_name="rmac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)""",
})
self.__rmac = t
if hasattr(self, '_set'):
self._set()
def _unset_rmac(self):
self.__rmac = YANGDynClass(base=unicode, is_leaf=True, yang_name="rmac", rest_name="rmac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
def _get_source_rd(self):
"""
Getter method for source_rd, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/source_rd (string)
YANG Description: Source Route distinguisher
"""
return self.__source_rd
def _set_source_rd(self, v, load=False):
"""
Setter method for source_rd, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/source_rd (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_source_rd is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_source_rd() directly.
YANG Description: Source Route distinguisher
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="source-rd", rest_name="source-rd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """source_rd must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="source-rd", rest_name="source-rd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)""",
})
self.__source_rd = t
if hasattr(self, '_set'):
self._set()
def _unset_source_rd(self):
self.__source_rd = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-rd", rest_name="source-rd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
def _get_vrf_label(self):
"""
Getter method for vrf_label, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/vrf_label (uint32)
YANG Description: VRF Label
"""
return self.__vrf_label
def _set_vrf_label(self, v, load=False):
"""
Setter method for vrf_label, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/vrf_label (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_vrf_label is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vrf_label() directly.
YANG Description: VRF Label
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=TypedListType(allowed_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)), is_leaf=False, yang_name="vrf-label", rest_name="vrf-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vrf_label must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)), is_leaf=False, yang_name="vrf-label", rest_name="vrf-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='uint32', is_config=False)""",
})
self.__vrf_label = t
if hasattr(self, '_set'):
self._set()
def _unset_vrf_label(self):
self.__vrf_label = YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)), is_leaf=False, yang_name="vrf-label", rest_name="vrf-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='uint32', is_config=False)
def _get_esi_value(self):
"""
Getter method for esi_value, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/esi_value (string)
YANG Description: ESI value
"""
return self.__esi_value
def _set_esi_value(self, v, load=False):
"""
Setter method for esi_value, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/esi_value (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_esi_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_esi_value() directly.
YANG Description: ESI value
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="esi-value", rest_name="esi-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """esi_value must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="esi-value", rest_name="esi-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)""",
})
self.__esi_value = t
if hasattr(self, '_set'):
self._set()
def _unset_esi_value(self):
self.__esi_value = YANGDynClass(base=unicode, is_leaf=True, yang_name="esi-value", rest_name="esi-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)
def _get_ipv4_address(self):
"""
Getter method for ipv4_address, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/ipv4_address (inet:ipv4-address)
YANG Description: IPv4 address
"""
return self.__ipv4_address
def _set_ipv4_address(self, v, load=False):
"""
Setter method for ipv4_address, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/ipv4_address (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv4_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv4_address() directly.
YANG Description: IPv4 address
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="ipv4-address", rest_name="ipv4-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv4_address must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="ipv4-address", rest_name="ipv4-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-address', is_config=False)""",
})
self.__ipv4_address = t
if hasattr(self, '_set'):
self._set()
def _unset_ipv4_address(self):
self.__ipv4_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="ipv4-address", rest_name="ipv4-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv4-address', is_config=False)
def _get_ipv6_address(self):
"""
Getter method for ipv6_address, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/ipv6_address (inet:ipv6-address)
YANG Description: IPv6 address
"""
return self.__ipv6_address
def _set_ipv6_address(self, v, load=False):
"""
Setter method for ipv6_address, mapped from YANG variable /bgp_state/route/evpn/l2/ethernet_segment/ipv6_address (inet:ipv6-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_address() directly.
YANG Description: IPv6 address
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv6-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv6_address must be of a type compatible with inet:ipv6-address""",
'defined-type': "inet:ipv6-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv6-address', is_config=False)""",
})
self.__ipv6_address = t
if hasattr(self, '_set'):
self._set()
def _unset_ipv6_address(self):
self.__ipv6_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='inet:ipv6-address', is_config=False)
prefix = __builtin__.property(_get_prefix)
status = __builtin__.property(_get_status)
age = __builtin__.property(_get_age)
next_hop = __builtin__.property(_get_next_hop)
next_hop_learned_from_peer = __builtin__.property(_get_next_hop_learned_from_peer)
next_hop_metric = __builtin__.property(_get_next_hop_metric)
rd = __builtin__.property(_get_rd)
vrf_label_direction = __builtin__.property(_get_vrf_label_direction)
local_preference = __builtin__.property(_get_local_preference)
med = __builtin__.property(_get_med)
origin = __builtin__.property(_get_origin)
weight = __builtin__.property(_get_weight)
as_path = __builtin__.property(_get_as_path)
communities = __builtin__.property(_get_communities)
extended_community = __builtin__.property(_get_extended_community)
atomic_aggregate_set = __builtin__.property(_get_atomic_aggregate_set)
aggregator = __builtin__.property(_get_aggregator)
originator = __builtin__.property(_get_originator)
cluster_list = __builtin__.property(_get_cluster_list)
adj_rib_out_count = __builtin__.property(_get_adj_rib_out_count)
admin_distance = __builtin__.property(_get_admin_distance)
tag = __builtin__.property(_get_tag)
l3_label = __builtin__.property(_get_l3_label)
l2_label = __builtin__.property(_get_l2_label)
esi = __builtin__.property(_get_esi)
rmac = __builtin__.property(_get_rmac)
source_rd = __builtin__.property(_get_source_rd)
vrf_label = __builtin__.property(_get_vrf_label)
esi_value = __builtin__.property(_get_esi_value)
ipv4_address = __builtin__.property(_get_ipv4_address)
ipv6_address = __builtin__.property(_get_ipv6_address)
_pyangbind_elements = {'prefix': prefix, 'status': status, 'age': age, 'next_hop': next_hop, 'next_hop_learned_from_peer': next_hop_learned_from_peer, 'next_hop_metric': next_hop_metric, 'rd': rd, 'vrf_label_direction': vrf_label_direction, 'local_preference': local_preference, 'med': med, 'origin': origin, 'weight': weight, 'as_path': as_path, 'communities': communities, 'extended_community': extended_community, 'atomic_aggregate_set': atomic_aggregate_set, 'aggregator': aggregator, 'originator': originator, 'cluster_list': cluster_list, 'adj_rib_out_count': adj_rib_out_count, 'admin_distance': admin_distance, 'tag': tag, 'l3_label': l3_label, 'l2_label': l2_label, 'esi': esi, 'rmac': rmac, 'source_rd': source_rd, 'vrf_label': vrf_label, 'esi_value': esi_value, 'ipv4_address': ipv4_address, 'ipv6_address': ipv6_address, }
| 70.115677 | 836 | 0.72776 |
ace9b1f0e00df2ac75822a1b3daecc7ddedad788 | 6,864 | py | Python | saleor/graphql/webhook/mutations.py | fairhopeweb/saleor | 9ac6c22652d46ba65a5b894da5f1ba5bec48c019 | [
"CC-BY-4.0"
] | 15,337 | 2015-01-12T02:11:52.000Z | 2021-10-05T19:19:29.000Z | saleor/graphql/webhook/mutations.py | fairhopeweb/saleor | 9ac6c22652d46ba65a5b894da5f1ba5bec48c019 | [
"CC-BY-4.0"
] | 7,486 | 2015-02-11T10:52:13.000Z | 2021-10-06T09:37:15.000Z | saleor/graphql/webhook/mutations.py | aminziadna/saleor | 2e78fb5bcf8b83a6278af02551a104cfa555a1fb | [
"CC-BY-4.0"
] | 5,864 | 2015-01-16T14:52:54.000Z | 2021-10-05T23:01:15.000Z | import graphene
from django.core.exceptions import ValidationError
from ...core.permissions import AppPermission
from ...webhook import models
from ...webhook.error_codes import WebhookErrorCode
from ..core.mutations import ModelDeleteMutation, ModelMutation
from ..core.types.common import WebhookError
from .enums import WebhookEventTypeEnum
class WebhookCreateInput(graphene.InputObjectType):
name = graphene.String(description="The name of the webhook.", required=False)
target_url = graphene.String(description="The url to receive the payload.")
events = graphene.List(
WebhookEventTypeEnum,
description=("The events that webhook wants to subscribe."),
)
app = graphene.ID(
required=False,
description="ID of the app to which webhook belongs.",
)
is_active = graphene.Boolean(
description="Determine if webhook will be set active or not.", required=False
)
secret_key = graphene.String(
description="The secret key used to create a hash signature with each payload.",
required=False,
)
class WebhookCreate(ModelMutation):
class Arguments:
input = WebhookCreateInput(
description="Fields required to create a webhook.", required=True
)
class Meta:
description = "Creates a new webhook subscription."
model = models.Webhook
permissions = (AppPermission.MANAGE_APPS,)
error_type_class = WebhookError
error_type_field = "webhook_errors"
@classmethod
def clean_input(cls, info, instance, data):
cleaned_data = super().clean_input(info, instance, data)
app = cleaned_data.get("app")
# We are not able to check it in `check_permission`.
# We need to confirm that cleaned_data has app_id or
# context has assigned app instance
if not instance.app_id and not app:
raise ValidationError("Missing token or app", code=WebhookErrorCode.INVALID)
if instance.app_id:
# Let's skip app id in case when context has
# app instance
app = instance.app
cleaned_data.pop("app", None)
if not app or not app.is_active:
raise ValidationError(
"App doesn't exist or is disabled",
code=WebhookErrorCode.NOT_FOUND,
)
return cleaned_data
@classmethod
def get_instance(cls, info, **data):
instance = super().get_instance(info, **data)
app = info.context.app
instance.app = app
return instance
@classmethod
def check_permissions(cls, context):
has_perm = super().check_permissions(context)
has_perm = bool(context.app) or has_perm
return has_perm
@classmethod
def save(cls, info, instance, cleaned_input):
instance.save()
events = set(cleaned_input.get("events", []))
models.WebhookEvent.objects.bulk_create(
[
models.WebhookEvent(webhook=instance, event_type=event)
for event in events
]
)
class WebhookUpdateInput(graphene.InputObjectType):
name = graphene.String(description="The new name of the webhook.", required=False)
target_url = graphene.String(
description="The url to receive the payload.", required=False
)
events = graphene.List(
WebhookEventTypeEnum,
description=("The events that webhook wants to subscribe."),
required=False,
)
app = graphene.ID(
required=False,
description="ID of the app to which webhook belongs.",
)
is_active = graphene.Boolean(
description="Determine if webhook will be set active or not.", required=False
)
secret_key = graphene.String(
description="Use to create a hash signature with each payload.", required=False
)
class WebhookUpdate(ModelMutation):
class Arguments:
id = graphene.ID(required=True, description="ID of a webhook to update.")
input = WebhookUpdateInput(
description="Fields required to update a webhook.", required=True
)
class Meta:
description = "Updates a webhook subscription."
model = models.Webhook
permissions = (AppPermission.MANAGE_APPS,)
error_type_class = WebhookError
error_type_field = "webhook_errors"
@classmethod
def clean_input(cls, info, instance, data):
cleaned_data = super().clean_input(info, instance, data)
app = cleaned_data.get("app")
if not instance.app_id and not app:
raise ValidationError("Missing token or app", code=WebhookErrorCode.INVALID)
if instance.app_id:
# Let's skip app id in case when context has
# app instance
app = instance.app
cleaned_data.pop("app", None)
if not app or not app.is_active:
raise ValidationError(
"App doesn't exist or is disabled",
code=WebhookErrorCode.NOT_FOUND,
)
return cleaned_data
@classmethod
def save(cls, info, instance, cleaned_input):
instance.save()
events = set(cleaned_input.get("events", []))
if events:
instance.events.all().delete()
models.WebhookEvent.objects.bulk_create(
[
models.WebhookEvent(webhook=instance, event_type=event)
for event in events
]
)
class WebhookDelete(ModelDeleteMutation):
class Arguments:
id = graphene.ID(required=True, description="ID of a webhook to delete.")
class Meta:
description = "Deletes a webhook subscription."
model = models.Webhook
permissions = (AppPermission.MANAGE_APPS,)
error_type_class = WebhookError
error_type_field = "webhook_errors"
@classmethod
def check_permissions(cls, context):
has_perm = super().check_permissions(context)
has_perm = bool(context.app) or has_perm
return has_perm
@classmethod
def perform_mutation(cls, _root, info, **data):
node_id = data["id"]
object_id = cls.get_global_id_or_error(node_id)
app = info.context.app
if app:
if not app.is_active:
raise ValidationError(
"App needs to be active to delete webhook",
code=WebhookErrorCode.INVALID,
)
try:
app.webhooks.get(id=object_id)
except models.Webhook.DoesNotExist:
raise ValidationError(
"Couldn't resolve to a node: %s" % node_id,
code=WebhookErrorCode.GRAPHQL_ERROR,
)
return super().perform_mutation(_root, info, **data)
| 33.812808 | 88 | 0.628351 |
ace9b2d3e0e7ba165a39f7f79ba47d75fa6c9143 | 1,362 | py | Python | src/king/core/king_animation.py | KingAirC/KingPaint | 32753c48415c3ed1474c5b5c55388ad6e2201600 | [
"MIT"
] | 1 | 2021-06-11T12:35:12.000Z | 2021-06-11T12:35:12.000Z | src/king/core/king_animation.py | KingAirC/KingPaint | 32753c48415c3ed1474c5b5c55388ad6e2201600 | [
"MIT"
] | null | null | null | src/king/core/king_animation.py | KingAirC/KingPaint | 32753c48415c3ed1474c5b5c55388ad6e2201600 | [
"MIT"
] | null | null | null | from matplotlib.animation import FuncAnimation
from .king_figure import KingFigure
from ..alg.basic_alg import linspace
class KingAnimation:
"""
Animation.
Parameter:
----------
artists : must be KingGeometry and Artist's instance list.
essential_data_generators : A list of function which generates essential_data for each artists,
accept a parameter represent for frame.
frames : Iterable or callable.
interval : Time interval.
See FuncAnimation.
"""
def __init__(self, artists, essential_data_generators, frames=linspace(0, 10, 101), interval=50):
KingFigure().update()
self.artists = artists
self.essential_data_generators = essential_data_generators
self.frames = frames
self.interval = interval
self.count = len(artists)
for a in artists:
a.mouse_translation = False
self.ani = FuncAnimation(KingFigure().fig, self.update, frames=frames, interval=interval, blit=True)
def update(self, frame):
result = []
for i in range(self.count):
result.extend(self.artists[i].update_affected(new_essential_data=self.essential_data_generators[i](frame),
repaint=False))
KingFigure().update()
return result
| 34.923077 | 118 | 0.640235 |
ace9b3900c85a7eb3091a4c045212fbb406bf05d | 43,515 | py | Python | src/sentry/event_manager.py | overquota/sentry | 2cb3a3e40ca0b7ca3308deb0d1d9c436ce8aaeb8 | [
"BSD-3-Clause"
] | 1 | 2019-08-28T11:03:13.000Z | 2019-08-28T11:03:13.000Z | src/sentry/event_manager.py | overquota/sentry | 2cb3a3e40ca0b7ca3308deb0d1d9c436ce8aaeb8 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/event_manager.py | overquota/sentry | 2cb3a3e40ca0b7ca3308deb0d1d9c436ce8aaeb8 | [
"BSD-3-Clause"
] | null | null | null | """
sentry.event_manager
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import jsonschema
import logging
import six
from datetime import datetime, timedelta
from django.conf import settings
from django.db import connection, IntegrityError, router, transaction
from django.db.models import Func
from django.utils import timezone
from django.utils.encoding import force_text
from sentry import buffer, eventtypes, eventstream, features, tagstore, tsdb, filters
from sentry.constants import (
LOG_LEVELS, LOG_LEVELS_MAP, VALID_PLATFORMS, MAX_TAG_VALUE_LENGTH,
)
from sentry.grouping.api import get_grouping_config_dict_for_project, \
get_grouping_config_dict_for_event_data, load_grouping_config, \
apply_server_fingerprinting, get_fingerprinting_config_for_project
from sentry.coreapi import (
APIError,
APIForbidden,
decompress_gzip,
decompress_deflate,
decode_and_decompress_data,
decode_data,
safely_load_json_string,
)
from sentry.interfaces.base import get_interface
from sentry.models import (
Activity, Environment, Event, EventDict, EventError, EventMapping, EventUser, Group,
GroupEnvironment, GroupHash, GroupLink, GroupRelease, GroupResolution, GroupStatus,
Project, Release, ReleaseEnvironment, ReleaseProject,
ReleaseProjectEnvironment, UserReport, Organization,
)
from sentry.plugins import plugins
from sentry.signals import event_discarded, event_saved, first_event_received
from sentry.tasks.integrations import kick_off_status_syncs
from sentry.utils import metrics
from sentry.utils.cache import default_cache
from sentry.utils.canonical import CanonicalKeyDict
from sentry.utils.contexts_normalization import normalize_user_agent
from sentry.utils.data_filters import (
is_valid_ip,
is_valid_release,
is_valid_error_message,
FilterStatKeys,
)
from sentry.utils.dates import to_timestamp
from sentry.utils.db import is_postgres
from sentry.utils.geo import rust_geoip
from sentry.utils.safe import safe_execute, trim, get_path, setdefault_path
from sentry.utils.validators import is_float
from sentry.stacktraces.processing import normalize_stacktraces_for_grouping
from sentry.culprit import generate_culprit
logger = logging.getLogger("sentry.events")
MAX_SECS_IN_FUTURE = 60
ALLOWED_FUTURE_DELTA = timedelta(seconds=MAX_SECS_IN_FUTURE)
MAX_SECS_IN_PAST = 2592000 # 30 days
SECURITY_REPORT_INTERFACES = (
"csp",
"hpkp",
"expectct",
"expectstaple",
)
def pop_tag(data, key):
data['tags'] = [kv for kv in data['tags'] if kv is None or kv[0] != key]
def set_tag(data, key, value):
pop_tag(data, key)
data['tags'].append((key, trim(value, MAX_TAG_VALUE_LENGTH)))
def get_tag(data, key):
for k, v in get_path(data, 'tags', filter=True):
if k == key:
return v
def count_limit(count):
# TODO: could we do something like num_to_store = max(math.sqrt(100*count)+59, 200) ?
# ~ 150 * ((log(n) - 1.5) ^ 2 - 0.25)
for amount, sample_rate in settings.SENTRY_SAMPLE_RATES:
if count <= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_RATE
def time_limit(silence): # ~ 3600 per hour
for amount, sample_rate in settings.SENTRY_SAMPLE_TIMES:
if silence >= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_TIME
def parse_client_as_sdk(value):
if not value:
return {}
try:
name, version = value.split("/", 1)
except ValueError:
try:
name, version = value.split(" ", 1)
except ValueError:
return {}
return {"name": name, "version": version}
if not settings.SENTRY_SAMPLE_DATA:
def should_sample(current_datetime, last_seen, times_seen):
return False
else:
def should_sample(current_datetime, last_seen, times_seen):
silence = current_datetime - last_seen
if times_seen % count_limit(times_seen) == 0:
return False
if times_seen % time_limit(silence) == 0:
return False
return True
def plugin_is_regression(group, event):
project = event.project
for plugin in plugins.for_project(project):
result = safe_execute(
plugin.is_regression, group, event, version=1, _with_transaction=False
)
if result is not None:
return result
return True
def process_timestamp(value, meta, current_datetime=None):
original_value = value
if value is None:
return None
if is_float(value):
try:
value = datetime.fromtimestamp(float(value))
except Exception:
meta.add_error(EventError.INVALID_DATA, original_value)
return None
elif isinstance(value, six.string_types):
# all timestamps are in UTC, but the marker is optional
if value.endswith('Z'):
value = value[:-1]
if '.' in value:
# Python doesn't support long microsecond values
# https://github.com/getsentry/sentry/issues/1610
ts_bits = value.split('.', 1)
value = '%s.%s' % (ts_bits[0], ts_bits[1][:2])
fmt = '%Y-%m-%dT%H:%M:%S.%f'
else:
fmt = '%Y-%m-%dT%H:%M:%S'
try:
value = datetime.strptime(value, fmt)
except Exception:
meta.add_error(EventError.INVALID_DATA, original_value)
return None
elif not isinstance(value, datetime):
meta.add_error(EventError.INVALID_DATA, original_value)
return None
if current_datetime is None:
current_datetime = datetime.now()
if value > current_datetime + ALLOWED_FUTURE_DELTA:
meta.add_error(EventError.FUTURE_TIMESTAMP, original_value)
return None
if value < current_datetime - timedelta(days=30):
meta.add_error(EventError.PAST_TIMESTAMP, original_value)
return None
return float(value.strftime('%s'))
def sanitize_fingerprint(value):
# Special case floating point values: Only permit floats that have an exact
# integer representation in JSON to avoid rounding issues.
if isinstance(value, float):
return six.text_type(int(value)) if abs(value) < (1 << 53) else None
# Stringify known types
if isinstance(value, six.string_types + six.integer_types):
return six.text_type(value)
# Silently skip all other values
return None
def cast_fingerprint(value):
# Return incompatible values so that schema validation can emit errors
if not isinstance(value, list):
return value
return list(f for f in map(sanitize_fingerprint, value) if f is not None)
def has_pending_commit_resolution(group):
return GroupLink.objects.filter(
group_id=group.id,
linked_type=GroupLink.LinkedType.commit,
relationship=GroupLink.Relationship.resolves,
).extra(
where=[
"NOT EXISTS(SELECT 1 FROM sentry_releasecommit where commit_id = sentry_grouplink.linked_id)"]
).exists()
class HashDiscarded(Exception):
pass
class ScoreClause(Func):
def __init__(self, group=None, last_seen=None, times_seen=None, *args, **kwargs):
self.group = group
self.last_seen = last_seen
self.times_seen = times_seen
# times_seen is likely an F-object that needs the value extracted
if hasattr(self.times_seen, 'rhs'):
self.times_seen = self.times_seen.rhs.value
super(ScoreClause, self).__init__(*args, **kwargs)
def __int__(self):
# Calculate the score manually when coercing to an int.
# This is used within create_or_update and friends
return self.group.get_score() if self.group else 0
def as_sql(self, compiler, connection, function=None, template=None):
db = getattr(connection, 'alias', 'default')
has_values = self.last_seen is not None and self.times_seen is not None
if is_postgres(db):
if has_values:
sql = 'log(times_seen + %d) * 600 + %d' % (self.times_seen,
to_timestamp(self.last_seen))
else:
sql = 'log(times_seen) * 600 + last_seen::abstime::int'
else:
# XXX: if we cant do it atomically let's do it the best we can
sql = int(self)
return (sql, [])
def add_meta_errors(errors, meta):
for field_meta in meta:
original_value = field_meta.get().get('val')
for i, (err_type, err_data) in enumerate(field_meta.iter_errors()):
error = dict(err_data)
error['type'] = err_type
if field_meta.path:
error['name'] = field_meta.path
if i == 0 and original_value is not None:
error['value'] = original_value
errors.append(error)
def _decode_event(data, content_encoding):
if isinstance(data, six.binary_type):
if content_encoding == 'gzip':
data = decompress_gzip(data)
elif content_encoding == 'deflate':
data = decompress_deflate(data)
elif data[0] != b'{':
data = decode_and_decompress_data(data)
else:
data = decode_data(data)
if isinstance(data, six.text_type):
data = safely_load_json_string(data)
return CanonicalKeyDict(data)
class EventManager(object):
"""
Handles normalization in both the store endpoint and the save task. The
intention is to swap this class out with a reimplementation in Rust.
"""
def __init__(
self,
data,
version='5',
project=None,
grouping_config=None,
client_ip=None,
user_agent=None,
auth=None,
key=None,
content_encoding=None,
is_renormalize=False,
remove_other=None
):
self._data = _decode_event(data, content_encoding=content_encoding)
self.version = version
self._project = project
if grouping_config is None and project is not None:
grouping_config = get_grouping_config_dict_for_project(self._project)
self._grouping_config = grouping_config
self._client_ip = client_ip
self._user_agent = user_agent
self._auth = auth
self._key = key
self._is_renormalize = is_renormalize
self._remove_other = remove_other
self._normalized = False
def process_csp_report(self):
"""Only called from the CSP report endpoint."""
data = self._data
try:
interface = get_interface(data.pop('interface'))
report = data.pop('report')
except KeyError:
raise APIForbidden('No report or interface data')
# To support testing, we can either accept a built interface instance, or the raw data in
# which case we build the instance ourselves
try:
instance = (
report if isinstance(report, interface) else interface.from_raw(report)
)
except jsonschema.ValidationError as e:
raise APIError('Invalid security report: %s' % str(e).splitlines()[0])
def clean(d):
return dict(filter(lambda x: x[1], d.items()))
data.update(
{
'logger': 'csp',
'message': instance.get_message(),
'culprit': instance.get_culprit(),
instance.path: instance.to_json(),
'tags': instance.get_tags(),
'errors': [],
'user': {'ip_address': self._client_ip},
# Construct a faux Http interface based on the little information we have
# This is a bit weird, since we don't have nearly enough
# information to create an Http interface, but
# this automatically will pick up tags for the User-Agent
# which is actually important here for CSP
'request': {
'url': instance.get_origin(),
'headers': clean(
{
'User-Agent': self._user_agent,
'Referer': instance.get_referrer(),
}
),
},
}
)
self._data = data
def normalize(self):
with metrics.timer('events.store.normalize.duration'):
self._normalize_impl()
metrics.timing(
'events.store.normalize.errors',
len(self._data.get("errors") or ()),
)
def _normalize_impl(self):
if self._normalized:
raise RuntimeError('Already normalized')
self._normalized = True
from semaphore.processing import StoreNormalizer
rust_normalizer = StoreNormalizer(
geoip_lookup=rust_geoip,
project_id=self._project.id if self._project else None,
client_ip=self._client_ip,
client=self._auth.client if self._auth else None,
key_id=six.text_type(self._key.id) if self._key else None,
grouping_config=self._grouping_config,
protocol_version=six.text_type(self.version) if self.version is not None else None,
stacktrace_frames_hard_limit=settings.SENTRY_STACKTRACE_FRAMES_HARD_LIMIT,
max_stacktrace_frames=settings.SENTRY_MAX_STACKTRACE_FRAMES,
valid_platforms=list(VALID_PLATFORMS),
max_secs_in_future=MAX_SECS_IN_FUTURE,
max_secs_in_past=MAX_SECS_IN_PAST,
enable_trimming=True,
is_renormalize=self._is_renormalize,
remove_other=self._remove_other,
)
self._data = CanonicalKeyDict(
rust_normalizer.normalize_event(dict(self._data))
)
normalize_user_agent(self._data)
def should_filter(self):
'''
returns (result: bool, reason: string or None)
Result is True if an event should be filtered
The reason for filtering is passed along as a string
so that we can store it in metrics
'''
for name in SECURITY_REPORT_INTERFACES:
if name in self._data:
interface = get_interface(name)
if interface.to_python(self._data[name]).should_filter(self._project):
return (True, FilterStatKeys.INVALID_CSP)
if self._client_ip and not is_valid_ip(self._project, self._client_ip):
return (True, FilterStatKeys.IP_ADDRESS)
release = self._data.get('release')
if release and not is_valid_release(self._project, release):
return (True, FilterStatKeys.RELEASE_VERSION)
error_message = get_path(self._data, 'logentry', 'formatted') \
or get_path(self._data, 'logentry', 'message') \
or ''
if error_message and not is_valid_error_message(self._project, error_message):
return (True, FilterStatKeys.ERROR_MESSAGE)
for exc in get_path(self._data, 'exception', 'values', filter=True, default=[]):
message = u': '.join(
filter(None, map(exc.get, ['type', 'value']))
)
if message and not is_valid_error_message(self._project, message):
return (True, FilterStatKeys.ERROR_MESSAGE)
for filter_cls in filters.all():
filter_obj = filter_cls(self._project)
if filter_obj.is_enabled() and filter_obj.test(self._data):
return (True, six.text_type(filter_obj.id))
return (False, None)
def get_data(self):
return self._data
def _get_event_instance(self, project_id=None):
data = self._data
event_id = data.get('event_id')
platform = data.get('platform')
recorded_timestamp = data.get('timestamp')
date = datetime.fromtimestamp(recorded_timestamp)
date = date.replace(tzinfo=timezone.utc)
time_spent = data.get('time_spent')
data['node_id'] = Event.generate_node_id(project_id, event_id)
return Event(
project_id=project_id or self._project.id,
event_id=event_id,
data=EventDict(data, skip_renormalization=True),
time_spent=time_spent,
datetime=date,
platform=platform
)
def get_culprit(self):
"""Helper to calculate the default culprit"""
return force_text(
self._data.get('culprit') or
self._data.get('transaction') or
generate_culprit(self._data) or
''
)
def get_event_type(self):
"""Returns the event type."""
return eventtypes.get(self._data.get('type', 'default'))()
def materialize_metadata(self):
"""Returns the materialized metadata to be merged with group or
event data. This currently produces the keys `type`, `metadata`,
`title` and `location`. This should most likely also produce
`culprit` here.
"""
event_type = self.get_event_type()
event_metadata = event_type.get_metadata(self._data)
return {
'type': event_type.key,
'metadata': event_metadata,
'title': event_type.get_title(event_metadata),
'location': event_type.get_location(event_metadata),
}
def get_search_message(self, event_metadata=None, culprit=None):
"""This generates the internal event.message attribute which is used
for search purposes. It adds a bunch of data from the metadata and
the culprit.
"""
if event_metadata is None:
event_metadata = self.get_event_type().get_metadata(self._data)
if culprit is None:
culprit = self.get_culprit()
data = self._data
message = ''
if data.get('logentry'):
message += (data['logentry'].get('formatted') or
data['logentry'].get('message') or '')
if event_metadata:
for value in six.itervalues(event_metadata):
value_u = force_text(value, errors='replace')
if value_u not in message:
message = u'{} {}'.format(message, value_u)
if culprit and culprit not in message:
culprit_u = force_text(culprit, errors='replace')
message = u'{} {}'.format(message, culprit_u)
return trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH)
def save(self, project_id, raw=False, assume_normalized=False):
# Normalize if needed
if not self._normalized:
if not assume_normalized:
self.normalize()
self._normalized = True
data = self._data
project = Project.objects.get_from_cache(id=project_id)
project._organization_cache = Organization.objects.get_from_cache(
id=project.organization_id)
# Check to make sure we're not about to do a bunch of work that's
# already been done if we've processed an event with this ID. (This
# isn't a perfect solution -- this doesn't handle ``EventMapping`` and
# there's a race condition between here and when the event is actually
# saved, but it's an improvement. See GH-7677.)
try:
event = Event.objects.get(
project_id=project.id,
event_id=data['event_id'],
)
except Event.DoesNotExist:
pass
else:
# Make sure we cache on the project before returning
event._project_cache = project
logger.info(
'duplicate.found',
exc_info=True,
extra={
'event_uuid': data['event_id'],
'project_id': project.id,
'model': Event.__name__,
}
)
return event
# Pull out the culprit
culprit = self.get_culprit()
# Pull the toplevel data we're interested in
level = data.get('level')
# TODO(mitsuhiko): this code path should be gone by July 2018.
# This is going to be fine because no code actually still depends
# on integers here. When we need an integer it will be converted
# into one later. Old workers used to send integers here.
if level is not None and isinstance(level, six.integer_types):
level = LOG_LEVELS[level]
transaction_name = data.get('transaction')
logger_name = data.get('logger')
release = data.get('release')
dist = data.get('dist')
environment = data.get('environment')
recorded_timestamp = data.get('timestamp')
# We need to swap out the data with the one internal to the newly
# created event object
event = self._get_event_instance(project_id=project_id)
self._data = data = event.data.data
event._project_cache = project
date = event.datetime
platform = event.platform
event_id = event.event_id
if transaction_name:
transaction_name = force_text(transaction_name)
# Some of the data that are toplevel attributes are duplicated
# into tags (logger, level, environment, transaction). These are
# different from legacy attributes which are normalized into tags
# ahead of time (site, server_name).
setdefault_path(data, 'tags', value=[])
set_tag(data, 'level', level)
if logger_name:
set_tag(data, 'logger', logger_name)
if environment:
set_tag(data, 'environment', environment)
if transaction_name:
set_tag(data, 'transaction', transaction_name)
if release:
# dont allow a conflicting 'release' tag
pop_tag(data, 'release')
release = Release.get_or_create(
project=project,
version=release,
date_added=date,
)
set_tag(data, 'sentry:release', release.version)
if dist and release:
dist = release.add_dist(dist, date)
# dont allow a conflicting 'dist' tag
pop_tag(data, 'dist')
set_tag(data, 'sentry:dist', dist.name)
else:
dist = None
event_user = self._get_event_user(project, data)
if event_user:
# dont allow a conflicting 'user' tag
pop_tag(data, 'user')
set_tag(data, 'sentry:user', event_user.tag_value)
# At this point we want to normalize the in_app values in case the
# clients did not set this appropriately so far.
grouping_config = load_grouping_config(
get_grouping_config_dict_for_event_data(data, project))
normalize_stacktraces_for_grouping(data, grouping_config)
for plugin in plugins.for_project(project, version=None):
added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False)
if added_tags:
# plugins should not override user provided tags
for key, value in added_tags:
if get_tag(data, key) is None:
set_tag(data, key, value)
for path, iface in six.iteritems(event.interfaces):
for k, v in iface.iter_tags():
set_tag(data, k, v)
# Get rid of ephemeral interface data
if iface.ephemeral:
data.pop(iface.path, None)
# The active grouping config was put into the event in the
# normalize step before. We now also make sure that the
# fingerprint was set to `'{{ default }}' just in case someone
# removed it from the payload. The call to get_hashes will then
# look at `grouping_config` to pick the right paramters.
data['fingerprint'] = data.get('fingerprint') or ['{{ default }}']
apply_server_fingerprinting(data, get_fingerprinting_config_for_project(project))
hashes = event.get_hashes()
data['hashes'] = hashes
# we want to freeze not just the metadata and type in but also the
# derived attributes. The reason for this is that we push this
# data into kafka for snuba processing and our postprocessing
# picks up the data right from the snuba topic. For most usage
# however the data is dynamically overriden by Event.title and
# Event.location (See Event.as_dict)
materialized_metadata = self.materialize_metadata()
event_metadata = materialized_metadata['metadata']
data.update(materialized_metadata)
data['culprit'] = culprit
# index components into ``Event.message``
# See GH-3248
event.message = self.get_search_message(event_metadata, culprit)
received_timestamp = event.data.get('received') or float(event.datetime.strftime('%s'))
# The group gets the same metadata as the event when it's flushed but
# additionally the `last_received` key is set. This key is used by
# _save_aggregate.
group_metadata = dict(materialized_metadata)
group_metadata['last_received'] = received_timestamp
kwargs = {
'platform': platform,
'message': event.message,
'culprit': culprit,
'logger': logger_name,
'level': LOG_LEVELS_MAP.get(level),
'last_seen': date,
'first_seen': date,
'active_at': date,
'data': group_metadata,
}
if release:
kwargs['first_release'] = release
try:
group, is_new, is_regression, is_sample = self._save_aggregate(
event=event, hashes=hashes, release=release, **kwargs
)
except HashDiscarded:
event_discarded.send_robust(
project=project,
sender=EventManager,
)
metrics.incr(
'events.discarded',
skip_internal=True,
tags={
'organization_id': project.organization_id,
'platform': platform,
},
)
raise
else:
event_saved.send_robust(
project=project,
event_size=event.size,
sender=EventManager,
)
event.group = group
# store a reference to the group id to guarantee validation of isolation
event.data.bind_ref(event)
# When an event was sampled, the canonical source of truth
# is the EventMapping table since we aren't going to be writing out an actual
# Event row. Otherwise, if the Event isn't being sampled, we can safely
# rely on the Event table itself as the source of truth and ignore
# EventMapping since it's redundant information.
if is_sample:
try:
with transaction.atomic(using=router.db_for_write(EventMapping)):
EventMapping.objects.create(project=project, group=group, event_id=event_id)
except IntegrityError:
logger.info(
'duplicate.found',
exc_info=True,
extra={
'event_uuid': event_id,
'project_id': project.id,
'group_id': group.id,
'model': EventMapping.__name__,
}
)
return event
environment = Environment.get_or_create(
project=project,
name=environment,
)
group_environment, is_new_group_environment = GroupEnvironment.get_or_create(
group_id=group.id,
environment_id=environment.id,
defaults={
'first_release': release if release else None,
},
)
if release:
ReleaseEnvironment.get_or_create(
project=project,
release=release,
environment=environment,
datetime=date,
)
ReleaseProjectEnvironment.get_or_create(
project=project,
release=release,
environment=environment,
datetime=date,
)
grouprelease = GroupRelease.get_or_create(
group=group,
release=release,
environment=environment,
datetime=date,
)
counters = [
(tsdb.models.group, group.id),
(tsdb.models.project, project.id),
]
if release:
counters.append((tsdb.models.release, release.id))
tsdb.incr_multi(counters, timestamp=event.datetime, environment_id=environment.id)
frequencies = [
# (tsdb.models.frequent_projects_by_organization, {
# project.organization_id: {
# project.id: 1,
# },
# }),
# (tsdb.models.frequent_issues_by_project, {
# project.id: {
# group.id: 1,
# },
# })
(tsdb.models.frequent_environments_by_group, {
group.id: {
environment.id: 1,
},
})
]
if release:
frequencies.append(
(tsdb.models.frequent_releases_by_group, {
group.id: {
grouprelease.id: 1,
},
})
)
tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)
UserReport.objects.filter(
project=project,
event_id=event_id,
).update(
group=group,
environment=environment,
)
# save the event unless its been sampled
if not is_sample:
try:
with transaction.atomic(using=router.db_for_write(Event)):
event.save()
except IntegrityError:
logger.info(
'duplicate.found',
exc_info=True,
extra={
'event_uuid': event_id,
'project_id': project.id,
'group_id': group.id,
'model': Event.__name__,
}
)
return event
tagstore.delay_index_event_tags(
organization_id=project.organization_id,
project_id=project.id,
group_id=group.id,
environment_id=environment.id,
event_id=event.id,
tags=event.tags,
date_added=event.datetime,
)
if event_user:
tsdb.record_multi(
(
(tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )),
(tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )),
),
timestamp=event.datetime,
environment_id=environment.id,
)
if release:
if is_new:
buffer.incr(
ReleaseProject, {'new_groups': 1}, {
'release_id': release.id,
'project_id': project.id,
}
)
if is_new_group_environment:
buffer.incr(
ReleaseProjectEnvironment, {'new_issues_count': 1}, {
'project_id': project.id,
'release_id': release.id,
'environment_id': environment.id,
}
)
safe_execute(
Group.objects.add_tags,
group,
environment,
event.get_tags(),
_with_transaction=False)
if not raw:
if not project.first_event:
project.update(first_event=date)
first_event_received.send_robust(project=project, group=group, sender=Project)
eventstream.insert(
group=group,
event=event,
is_new=is_new,
is_sample=is_sample,
is_regression=is_regression,
is_new_group_environment=is_new_group_environment,
primary_hash=hashes[0],
# We are choosing to skip consuming the event back
# in the eventstream if it's flagged as raw.
# This means that we want to publish the event
# through the event stream, but we don't care
# about post processing and handling the commit.
skip_consume=raw,
)
metrics.timing(
'events.latency',
received_timestamp - recorded_timestamp,
tags={
'project_id': project.id,
},
)
metrics.timing(
'events.size.data.post_save',
event.size,
tags={'project_id': project.id}
)
return event
def _get_event_user(self, project, data):
user_data = data.get('user')
if not user_data:
return
euser = EventUser(
project_id=project.id,
ident=user_data.get('id'),
email=user_data.get('email'),
username=user_data.get('username'),
ip_address=user_data.get('ip_address'),
name=user_data.get('name'),
)
euser.set_hash()
if not euser.hash:
return
cache_key = u'euserid:1:{}:{}'.format(
project.id,
euser.hash,
)
euser_id = default_cache.get(cache_key)
if euser_id is None:
try:
with transaction.atomic(using=router.db_for_write(EventUser)):
euser.save()
except IntegrityError:
try:
euser = EventUser.objects.get(
project_id=project.id,
hash=euser.hash,
)
except EventUser.DoesNotExist:
# why???
e_userid = -1
else:
if euser.name != (user_data.get('name') or euser.name):
euser.update(
name=user_data['name'],
)
e_userid = euser.id
default_cache.set(cache_key, e_userid, 3600)
return euser
def _find_hashes(self, project, hash_list):
return map(
lambda hash: GroupHash.objects.get_or_create(
project=project,
hash=hash,
)[0],
hash_list,
)
def _save_aggregate(self, event, hashes, release, **kwargs):
project = event.project
# attempt to find a matching hash
all_hashes = self._find_hashes(project, hashes)
existing_group_id = None
for h in all_hashes:
if h.group_id is not None:
existing_group_id = h.group_id
break
if h.group_tombstone_id is not None:
raise HashDiscarded('Matches group tombstone %s' % h.group_tombstone_id)
# XXX(dcramer): this has the opportunity to create duplicate groups
# it should be resolved by the hash merging function later but this
# should be better tested/reviewed
if existing_group_id is None:
# it's possible the release was deleted between
# when we queried for the release and now, so
# make sure it still exists
first_release = kwargs.pop('first_release', None)
with transaction.atomic():
short_id = project.next_short_id()
group, group_is_new = Group.objects.create(
project=project,
short_id=short_id,
first_release_id=Release.objects.filter(
id=first_release.id,
).values_list('id', flat=True).first() if first_release else None,
**kwargs
), True
metrics.incr(
'group.created',
skip_internal=True,
tags={'platform': event.platform or 'unknown'}
)
else:
group = Group.objects.get(id=existing_group_id)
group_is_new = False
# If all hashes are brand new we treat this event as new
is_new = False
new_hashes = [h for h in all_hashes if h.group_id is None]
if new_hashes:
# XXX: There is a race condition here wherein another process could
# create a new group that is associated with one of the new hashes,
# add some event(s) to it, and then subsequently have the hash
# "stolen" by this process. This then "orphans" those events from
# their "siblings" in the group we've created here. We don't have a
# way to fix this, since we can't update the group on those hashes
# without filtering on `group_id` (which we can't do due to query
# planner weirdness.) For more context, see 84c6f75a and d0e22787,
# as well as GH-5085.
GroupHash.objects.filter(
id__in=[h.id for h in new_hashes],
).exclude(
state=GroupHash.State.LOCKED_IN_MIGRATION,
).update(group=group)
if group_is_new and len(new_hashes) == len(all_hashes):
is_new = True
# XXX(dcramer): it's important this gets called **before** the aggregate
# is processed as otherwise values like last_seen will get mutated
can_sample = (
features.has('projects:sample-events', project=project) and should_sample(
event.data.get('received') or float(event.datetime.strftime('%s')),
group.data.get('last_received') or float(group.last_seen.strftime('%s')),
group.times_seen,
)
)
if not is_new:
is_regression = self._process_existing_aggregate(
group=group,
event=event,
data=kwargs,
release=release,
)
else:
is_regression = False
# Determine if we've sampled enough data to store this event
if is_new or is_regression:
is_sample = False
else:
is_sample = can_sample
if not is_sample:
GroupHash.record_last_processed_event_id(
all_hashes[0].id,
event.event_id,
)
return group, is_new, is_regression, is_sample
def _handle_regression(self, group, event, release):
if not group.is_resolved():
return
# we only mark it as a regression if the event's release is newer than
# the release which we originally marked this as resolved
elif GroupResolution.has_resolution(group, release):
return
elif has_pending_commit_resolution(group):
return
if not plugin_is_regression(group, event):
return
# we now think its a regression, rely on the database to validate that
# no one beat us to this
date = max(event.datetime, group.last_seen)
is_regression = bool(
Group.objects.filter(
id=group.id,
# ensure we cant update things if the status has been set to
# ignored
status__in=[GroupStatus.RESOLVED, GroupStatus.UNRESOLVED],
).exclude(
# add to the regression window to account for races here
active_at__gte=date - timedelta(seconds=5),
).update(
active_at=date,
# explicitly set last_seen here as ``is_resolved()`` looks
# at the value
last_seen=date,
status=GroupStatus.UNRESOLVED
)
)
group.active_at = date
group.status = GroupStatus.UNRESOLVED
if is_regression and release:
# resolutions are only valid if the state of the group is still
# resolved -- if it were to change the resolution should get removed
try:
resolution = GroupResolution.objects.get(
group=group,
)
except GroupResolution.DoesNotExist:
affected = False
else:
cursor = connection.cursor()
# delete() API does not return affected rows
cursor.execute("DELETE FROM sentry_groupresolution WHERE id = %s", [resolution.id])
affected = cursor.rowcount > 0
if affected:
# if we had to remove the GroupResolution (i.e. we beat the
# the queue to handling this) then we need to also record
# the corresponding event
try:
activity = Activity.objects.filter(
group=group,
type=Activity.SET_RESOLVED_IN_RELEASE,
ident=resolution.id,
).order_by('-datetime')[0]
except IndexError:
# XXX: handle missing data, as its not overly important
pass
else:
activity.update(data={
'version': release.version,
})
if is_regression:
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_REGRESSION,
data={
'version': release.version if release else '',
}
)
activity.send_notification()
kick_off_status_syncs.apply_async(kwargs={
'project_id': group.project_id,
'group_id': group.id,
})
return is_regression
def _process_existing_aggregate(self, group, event, data, release):
date = max(event.datetime, group.last_seen)
extra = {
'last_seen': date,
'score': ScoreClause(group),
'data': data['data'],
}
if event.message and event.message != group.message:
extra['message'] = event.message
if group.level != data['level']:
extra['level'] = data['level']
if group.culprit != data['culprit']:
extra['culprit'] = data['culprit']
is_regression = self._handle_regression(group, event, release)
group.last_seen = extra['last_seen']
update_kwargs = {
'times_seen': 1,
}
buffer.incr(Group, update_kwargs, {
'id': group.id,
}, extra)
return is_regression
| 35.668033 | 106 | 0.580306 |
ace9b39803ec541aabf42aa542727ec85c93190d | 2,946 | py | Python | pong old game/main.py | CuteCatGames/ICHS_PONG | a309e9de05928798860ccecfa2b11ce94997e38e | [
"CC0-1.0"
] | 2 | 2021-03-20T11:26:35.000Z | 2021-04-11T10:29:48.000Z | pong old game/main.py | CuteCatGames/ICHS_PONG | a309e9de05928798860ccecfa2b11ce94997e38e | [
"CC0-1.0"
] | null | null | null | pong old game/main.py | CuteCatGames/ICHS_PONG | a309e9de05928798860ccecfa2b11ce94997e38e | [
"CC0-1.0"
] | null | null | null | import engine
import turtle
import random
print("Welcome to PONG")
# input("Press any key to start [Except the power button]")
player1 = turtle.Turtle()
player2 = turtle.Turtle()
ball = turtle.Turtle()
board = turtle.Screen()
turtle.bgcolor("black")
player1.penup()
player2.penup()
ball.penup()
ball.shape("turtle")
player1.shape("square")
player2.shape("square")
player1.color("white")
player2.color("white")
ball.color("white")
board.setup(600, 600)
player1.goto(-250, 0)
player2.goto(250, 0)
player1.seth(90)
player2.seth(90)
paddle = ((-5, -15), (-5, 15), (5, 15), (5, -15))
turtle.register_shape("paddle", paddle)
player1.shape("paddle")
player2.shape("paddle")
def player1up():
player1.fd(10)
def player1down():
player1.bk(10)
def player2up():
player2.fd(10)
def player2down():
player2.bk(10)
ball.seth(25)
while True:
# ball movement
ball.fd(1)
if ball.distance(player2) <= 30 or ball.distance(player1) <= 30 or ball.ycor() >= 250 or ball.ycor() <= -250:
theta = ball.heading()
randval = random.randint(0, 10)
posneg = random.getrandbits(1)
if posneg == 0:
mod = 90 + randval
elif posneg == 1:
mod = 90 - randval
if theta >= 270 or theta <= 90:
if theta >= 270:
thetaIncident = theta - 270
thetaReflection = 180 - thetaIncident
elif theta <= 90:
thetaIncident = 90 - theta
thetaReflection = 180 + thetaIncident
elif theta > 90 and theta < 270:
if theta > 90 and theta <= 180:
thetaIncident = theta - 90
thetaReflection = 0 + thetaIncident
elif theta > 180 and theta < 270:
theta = 270 - theta
thetaReflection = 270 + thetaIncident
ball.seth(thetaReflection)
# else:
# randval = random.randint(0, 30)
# posneg = random.getrandbits(1)
# if posneg == 0:
# mod = 180 + randval
# elif posneg == 1:
# mod = 180 - randval
# theta = theta + mod
board.onkeypress(player1up, "w")
board.onkeypress(player1down, "s")
board.onkeypress(player2up, "i")
board.onkeypress(player2down, "k")
turtle.listen()
if ball.ycor() == -250:
if ball.heading() >= 180:
heading = ball.heading()
heading = heading + 95
ball.seth(heading)
if ball.heading() < 180:
heading = ball.heading()
heading = heading - 95
ball.seth(heading)
if ball.ycor() == 250:
if ball.heading() >= 0 and ball.heading() < 90:
heading = ball.heading()
heading = heading + 95
ball.seth(heading)
if ball.heading() < 0 and ball.heading() > 270:
heading = ball.heading()
heading = heading - 95
ball.seth(heading)
| 23.95122 | 113 | 0.561439 |
ace9b3df145fcf60c0ed866d6d63188503d1d581 | 14,102 | py | Python | unsupervised_add_lang.py | carlosep93/LANGSPEC | 8c8f55d999d79628a56f48d4e1a8918f8c426f72 | [
"BSD-3-Clause"
] | null | null | null | unsupervised_add_lang.py | carlosep93/LANGSPEC | 8c8f55d999d79628a56f48d4e1a8918f8c426f72 | [
"BSD-3-Clause"
] | null | null | null | unsupervised_add_lang.py | carlosep93/LANGSPEC | 8c8f55d999d79628a56f48d4e1a8918f8c426f72 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Train a new model on one or across multiple GPUs.
"""
import collections
import itertools
import os
import math
import torch
from fairseq import distributed_utils, options, progress_bar, tasks, utils
from fairseq.data import iterators
from fairseq.trainer import Trainer
from fairseq.meters import AverageMeter, StopwatchMeter
def main(args):
if args.max_tokens is None:
args.max_tokens = 6000
print(args)
if not torch.cuda.is_available():
raise NotImplementedError('Training on CPU is not supported')
torch.cuda.set_device(args.device_id)
torch.manual_seed(args.seed)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load dataset splits
load_dataset_splits(task, ['train', 'valid'])
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
print('| num. model params: {}'.format(sum(p.numel() for p in model.parameters())))
# Make a dummy batch to (i) warm the caching allocator and (ii) as a
# placeholder DistributedDataParallel when there's an uneven number of
# batches per worker.
max_positions = utils.resolve_max_positions(
task.max_positions(),
model.max_positions(),
)
dummy_batch = task.dataset('train').get_dummy_batch(args.max_tokens, max_positions)
# Build trainer
trainer = Trainer(args, task, model, criterion, dummy_batch)
print('| training on {} GPUs'.format(args.distributed_world_size))
print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(
args.max_tokens,
args.max_sentences,
))
# Initialize dataloader
epoch_itr = task.get_batch_iterator(
dataset=task.dataset(args.train_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
ignore_invalid_inputs=True,
required_batch_size_multiple=8,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
)
#Load partial previous model
load_previous_model(args,trainer)
# Load the latest checkpoint if one is available
if not load_checkpoint(args, trainer, epoch_itr):
trainer.dummy_train_step([dummy_batch])
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
max_update = args.max_update or math.inf
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
valid_losses = [None]
valid_subsets = args.valid_subset.split(',')
while lr > args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update:
# train for one epoch
train(args, trainer, task, epoch_itr)
if epoch_itr.epoch % args.validate_interval == 0:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
# save checkpoint
if epoch_itr.epoch % args.save_interval == 0:
save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
train_meter.stop()
print('| done training in {:.1f} seconds'.format(train_meter.sum))
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch."""
# Update parameters every N batches
if epoch_itr.epoch <= len(args.update_freq):
update_freq = args.update_freq[epoch_itr.epoch - 1]
else:
update_freq = args.update_freq[-1]
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(fix_batches_to_gpus=args.fix_batches_to_gpus)
itr = iterators.GroupedIterator(itr, update_freq)
progress = progress_bar.build_progress_bar(
args, itr, epoch_itr.epoch, no_progress_bar='simple',
)
extra_meters = collections.defaultdict(lambda: AverageMeter())
first_valid = args.valid_subset.split(',')[0]
max_update = args.max_update or math.inf
for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):
log_output = trainer.train_step(samples)
if log_output is None:
continue
# log mid-epoch stats
stats = get_training_stats(trainer)
for k, v in log_output.items():
if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
continue # these are already logged above
if 'loss' in k:
extra_meters[k].update(v, log_output['sample_size'])
else:
extra_meters[k].update(v)
stats[k] = extra_meters[k].avg
progress.log(stats)
# ignore the first mini-batch in words-per-second calculation
if i == 0:
trainer.get_meter('wps').reset()
num_updates = trainer.get_num_updates()
if args.save_interval_updates > 0 and num_updates % args.save_interval_updates == 0 and num_updates > 0:
valid_losses = validate(args, trainer, task, epoch_itr, [first_valid])
save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
if num_updates >= max_update:
break
# log end-of-epoch stats
stats = get_training_stats(trainer)
for k, meter in extra_meters.items():
stats[k] = meter.avg
progress.print(stats)
# reset training meters
for k in [
'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip',
]:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
def get_training_stats(trainer):
stats = collections.OrderedDict()
stats['loss'] = '{:.3f}'.format(trainer.get_meter('train_loss').avg)
if trainer.get_meter('train_nll_loss').count > 0:
nll_loss = trainer.get_meter('train_nll_loss').avg
stats['nll_loss'] = '{:.3f}'.format(nll_loss)
else:
nll_loss = trainer.get_meter('train_loss').avg
stats['ppl'] = get_perplexity(nll_loss)
stats['wps'] = round(trainer.get_meter('wps').avg)
stats['ups'] = '{:.1f}'.format(trainer.get_meter('ups').avg)
stats['wpb'] = round(trainer.get_meter('wpb').avg)
stats['bsz'] = round(trainer.get_meter('bsz').avg)
stats['num_updates'] = trainer.get_num_updates()
stats['lr'] = trainer.get_lr()
stats['gnorm'] = '{:.3f}'.format(trainer.get_meter('gnorm').avg)
stats['clip'] = '{:.0%}'.format(trainer.get_meter('clip').avg)
stats['oom'] = trainer.get_meter('oom').avg
if trainer.get_meter('loss_scale') is not None:
stats['loss_scale'] = '{:.3f}'.format(trainer.get_meter('loss_scale').avg)
stats['wall'] = round(trainer.get_meter('wall').elapsed_time)
stats['train_wall'] = round(trainer.get_meter('train_wall').sum)
return stats
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
valid_losses = []
for subset in subsets:
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=task.dataset(subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences_valid,
max_positions=utils.resolve_max_positions(
task.max_positions(),
trainer.get_model().max_positions(),
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=8,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
).next_epoch_itr(shuffle=False)
progress = progress_bar.build_progress_bar(
args, itr, epoch_itr.epoch,
prefix='valid on \'{}\' subset'.format(subset),
no_progress_bar='simple'
)
# reset validation loss meters
for k in ['valid_loss', 'valid_nll_loss']:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
extra_meters = collections.defaultdict(lambda: AverageMeter())
for sample in progress:
log_output = trainer.valid_step(sample)
for k, v in log_output.items():
if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
continue
extra_meters[k].update(v)
# log validation stats
stats = get_valid_stats(trainer)
for k, meter in extra_meters.items():
stats[k] = meter.avg
progress.print(stats)
valid_losses.append(stats['valid_loss'])
return valid_losses
def get_valid_stats(trainer):
stats = collections.OrderedDict()
stats['valid_loss'] = trainer.get_meter('valid_loss').avg
if trainer.get_meter('valid_nll_loss').count > 0:
nll_loss = trainer.get_meter('valid_nll_loss').avg
stats['valid_nll_loss'] = nll_loss
else:
nll_loss = trainer.get_meter('valid_loss').avg
stats['valid_ppl'] = get_perplexity(nll_loss)
stats['num_updates'] = trainer.get_num_updates()
if hasattr(save_checkpoint, 'best'):
stats['best'] = min(save_checkpoint.best, stats['valid_loss'])
return stats
def get_perplexity(loss):
try:
return '{:.2f}'.format(math.pow(2, loss))
except OverflowError:
return float('inf')
def save_checkpoint(args, trainer, epoch_itr, val_loss):
if args.no_save or not distributed_utils.is_master(args):
return
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
checkpoint_conds = collections.OrderedDict()
checkpoint_conds['checkpoint{}.pt'.format(epoch)] = (
end_of_epoch and not args.no_epoch_checkpoints and
epoch % args.save_interval == 0
)
checkpoint_conds['checkpoint_{}_{}.pt'.format(epoch, updates)] = (
not end_of_epoch and args.save_interval_updates > 0 and
updates % args.save_interval_updates == 0
)
checkpoint_conds['checkpoint_best.pt'] = (
val_loss is not None and
(not hasattr(save_checkpoint, 'best') or val_loss < save_checkpoint.best)
)
checkpoint_conds['checkpoint_last.pt'] = True # keep this last so that it's a symlink
prev_best = getattr(save_checkpoint, 'best', val_loss)
if val_loss is not None:
save_checkpoint.best = min(val_loss, prev_best)
extra_state = {
'train_iterator': epoch_itr.state_dict(),
'val_loss': val_loss,
}
if hasattr(save_checkpoint, 'best'):
extra_state.update({'best': save_checkpoint.best})
checkpoints = [os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond]
if len(checkpoints) > 0:
for cp in checkpoints:
trainer.save_checkpoint(cp, extra_state)
if not end_of_epoch and args.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
checkpoints = utils.checkpoint_paths(args.save_dir, pattern=r'checkpoint_\d+_(\d+)\.pt')
for old_chk in checkpoints[args.keep_interval_updates:]:
os.remove(old_chk)
def load_checkpoint(args, trainer, epoch_itr):
"""Load a checkpoint and replay dataloader to match."""
os.makedirs(args.save_dir, exist_ok=True)
checkpoint_path = os.path.join(args.save_dir, args.restore_file)
if os.path.isfile(checkpoint_path):
extra_state = trainer.load_checkpoint(checkpoint_path, args.reset_optimizer, args.reset_lr_scheduler,
eval(args.optimizer_overrides))
if extra_state is not None:
# replay train iterator to match checkpoint
epoch_itr.load_state_dict(extra_state['train_iterator'])
print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(
checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))
trainer.lr_step(epoch_itr.epoch)
trainer.lr_step_update(trainer.get_num_updates())
if 'best' in extra_state:
save_checkpoint.best = extra_state['best']
return True
return False
def load_previous_model(args, trainer):
"""Load a checkpoint and replay dataloader to match."""
os.makedirs(args.save_dir, exist_ok=True)
checkpoint_path = os.path.join(args.prev_model, args.restore_file)
if os.path.isfile(checkpoint_path):
trainer.load_partial_checkpoint(checkpoint_path,args.key, args.newkey, args.reuse, args.reset_optimizer, args.reset_lr_scheduler,
eval(args.optimizer_overrides))
return True
return False
def load_dataset_splits(task, splits):
for split in splits:
if split == 'train':
task.load_dataset(split, combine=True)
else:
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
try:
task.load_dataset(split_k, combine=False)
except FileNotFoundError as e:
if k > 0:
break
raise e
if __name__ == '__main__':
parser = options.get_add_lang_parser()
args = options.parse_args_and_arch(parser)
if args.distributed_port > 0 or args.distributed_init_method is not None:
from distributed_train import main as distributed_main
distributed_main(args)
elif args.distributed_world_size > 1:
from multiprocessing_train import main as multiprocessing_main
multiprocessing_main(args)
else:
main(args)
| 37.505319 | 137 | 0.654375 |
ace9b4632528e8a2b8fb2be00304b122b53c503b | 5,462 | py | Python | src/Evaluation/Mesure.py | hanouticelina/search_engine | 2db76ed07d7c5885003bd9a5d7a89450128361fc | [
"MIT"
] | null | null | null | src/Evaluation/Mesure.py | hanouticelina/search_engine | 2db76ed07d7c5885003bd9a5d7a89450128361fc | [
"MIT"
] | null | null | null | src/Evaluation/Mesure.py | hanouticelina/search_engine | 2db76ed07d7c5885003bd9a5d7a89450128361fc | [
"MIT"
] | null | null | null | import numpy as np
from abc import ABC, abstractmethod
import sys
sys.path.append("..")
from Indexation.Index import *
from Indexation.TextRepresenter import *
from Indexation.Parser import *
from Ordonnancement.Weighter import *
from .Query import *
class EvalMesure(ABC):
"""
Classe abstraite pour les mesures d'evaluation.
-----------------------------------------------------
"""
@abstractmethod
def evalQuery(self,liste,relevants):
"""
Calcule la mesure pour la liste de documents retournés par un modèle et la liste de documents pertinents.
-----------------------------------------------------
Args :
- liste : liste de documents retournés par un modèle
- relevants : liste de documents pertinents pour une requete
"""
pass
def eval_list_query(self, list_pred, list_relevants):
"""
Retourne la moyenne et l'ecart type pour un modèle.
-----------------------------------------------------
Args :
- list_pred : liste de documents retournés par un modèle.
- list_relevants : liste de documents pertinents pour une requete.
"""
res = [self.evalQuery(pred, labels) for pred, relevants in zip(list_pred, list_relevants)]
return np.mean(res), np.std(res)
class PrecisionK(EvalMesure):
"""
Classe associée à la precision au range k.
-----------------------------------------------------
"""
def __init__(self,k):
self.k = k
def evalQuery(self, liste,relevants):
n = min(self.k, len(relevants))
if len(relevants) == 0:
# This query has no relevant documents, we return either None or 1 (?)
return 1
rel_th = relevants[:n]
pred = liste[:n]
nb_correct = 0
for doc in pred:
if doc in rel_th:
nb_correct += 1
return nb_correct / n
class RecallK(EvalMesure):
"""
Classe associée au rappel au rang k.
------------------------------------------------------
"""
def __init__(self,k):
self.k = k
def evalQuery(self, liste, relevants):
n = min(self.k, len(relevants))
if len(relevants) == 0:
# This query has no relevant documents
return 1
rel_th = relevants[:n]
pred = liste[:n]
nb_correct = 0
for doc in pred:
if doc in rel_th:
nb_correct += 1
return nb_correct / len(relevants)
class FMesureK(EvalMesure):
"""
Classe associée à la mesure-F au rang k.
------------------------------------------------------
"""
def __init__(self, beta, k):
self.beta = beta
self.k = k
def evalQuery(self, liste, relevants):
if len(relevants)==0:
# This query has no relevant documents
return 1
relevants = set(relevants)
n = min(self.k, len(relevants))
cpt = 0
for p in liste[:n]:
if p in relevants:
cpt+=1
if cpt==0:
return 0
precision = cpt/n
rappel = cpt/len(relevants)
return (1 + self.beta**2) * (precision * rappel)/\
(self.beta**2 * precision + rappel)
class AveragePrecision(EvalMesure):
"""
Classe associée à la precision moyenne.
------------------------------------------------------
"""
def evalQuery(self, liste, relevants):
n = len(relevants)
if (len(relevants) == 0):
# This query has no relevant documents
return 1
nb_correct = 0
total = 0
for k in range(len(liste)):
if liste[k] in relevants:
for i in range(k):
if liste[i] in relevants:
nb_correct +=1
nb_correct /= (k+1)
total += nb_correct
return total * 1.0 / n
def evalQueryApprox(self, liste, query):
pass
class ReciprocalRank(EvalMesure):
"""
Classe associée à la moyenne des rangs inverses.
------------------------------------------------------
"""
def evalQuery(self, liste, relevants):
if (len(relevants) == 0):
# This query has no relevant documents
return 1
best = liste[0]
if best not in relevants:
return 0
return 1 / (relevants.index(best) + 1)
class NDCG(EvalMesure):
"""
Classe associée à la mesure NDCG.
------------------------------------------------------
"""
def __init__(self, rg):
self.rg = rg
def iDCG(self):
"""
Calcule l'iDCG (idéal DCG)
"""
return 1 + np.sum([1 / np.log2(k + 1) for k in range(1,self.rg)])
def DCG(self, liste, relevants):
"""
Calcule le DCG.
------------------------------------------------------
"""
rg = min(self.rg, len(relevants))
relevants = set(relevants)
sum_ = 1 if liste[0] in relevants else 0
for k in range(1, rg):
if liste[k] in relevants:
sum_ += 1 / np.log2(k + 1)
return sum_
def evalQuery(self, liste, relevants):
if len(relevants) == 0:
# This query has no relevant documents
return 1
ndcg = self.DCG(liste, relevants) / self.iDCG()
return ndcg
| 28.447917 | 113 | 0.490114 |
ace9b5565e4187675815df726ffb6bafaa947cee | 169 | py | Python | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/projects-DS/data_struct_and_algo/phone_num.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 5 | 2021-06-02T23:44:25.000Z | 2021-12-27T16:21:57.000Z | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/projects-DS/data_struct_and_algo/phone_num.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 22 | 2021-05-31T01:33:25.000Z | 2021-10-18T18:32:39.000Z | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/projects-DS/data_struct_and_algo/phone_num.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 3 | 2021-06-19T03:37:47.000Z | 2021-08-31T00:49:51.000Z | import phonenumbers
from phonenumbers import geocoder
phone_number2 = phonenumbers.parse("+919014705402")
print(geocoder.description_for_number(phone_number2, "en"))
| 21.125 | 59 | 0.828402 |
ace9b78b85763bd5bc46866e988636ffb05a769f | 4,775 | py | Python | src/m1r_console_input_examples.py | csse120/14-Input_WaitUntilEvent_WhileLoops | 6ca54cb704335e3ecd4c08bde741d09f5e36f58d | [
"MIT"
] | null | null | null | src/m1r_console_input_examples.py | csse120/14-Input_WaitUntilEvent_WhileLoops | 6ca54cb704335e3ecd4c08bde741d09f5e36f58d | [
"MIT"
] | null | null | null | src/m1r_console_input_examples.py | csse120/14-Input_WaitUntilEvent_WhileLoops | 6ca54cb704335e3ecd4c08bde741d09f5e36f58d | [
"MIT"
] | null | null | null | """
This module demonstrates how to INPUT from the CONSOLE:
-- ints (integers)
-- floats (floating point numbers)
-- strings.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Derek Whitley, their colleagues,
and PUT_YOUR_NAME_HERE.
""" # TODO: 1. PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
# TODO: 2. Read and run this program. Then do the following problems,
# putting your answers RIGHT HERE IN THIS DOCUMENT.
# __
# 1. Write a line of code that would input an INTEGER from the
# console, storing the integer in a variable called 'm'.
# Write your line here:
# __
# 2. Write a line of code that would input a FLOAT from the console,
# storing the float in a variable called 'f'.
# Write your line here:
# __
# 3. Write a line of code that would input an STRING from the console,
# storing the string in a variable called 's'.
# Write your line here:
# __
# 4. What happens if you (the user) enter something OTHER than a
# single integer (e.g., you enter
# five
# or
# 4.5
# or
# 1 1 1
# or
# nothing at all (just press the Enter key)
# -- try them!) when running the input_an_integer example?
# Put your answer here:
# __
# After you have PUT YOUR ANSWERS IN THIS COMMENT as described above,
# a. Find someone who has had THEIR answer checked.
# b. Ask THEM to check YOUR answers to the above.
# c. Change the above _TODO_ to DONE.
# __
# As always, ask questions as needed!
###############################################################################
def main():
""" Calls the other functions in this module to demo CONSOLE IO. """
input_a_string()
input_an_integer()
input_a_float()
###############################################################################
# Example: how to INPUT a STRING from the Console.
###############################################################################
def input_a_string():
print()
print('--------------------------------------------------')
print('Demonstrating CONSOLE INPUT of a STRING:')
print('--------------------------------------------------')
# ----------- Using the INPUT function ---------------------------------
name = input('Enter your name: ')
# -------------------------------------------------------------------------
print('Hi, ' + name + '! ', name, '!. ', name)
print(' Sorry, I have the hiccups...')
###############################################################################
# Example: how to INPUT an INTEGER from the Console.
###############################################################################
def input_an_integer():
print()
print('--------------------------------------------------')
print('Demonstrating CONSOLE INPUT of an INTEGER:')
print('--------------------------------------------------')
# ----------- Using the INPUT and INT functions --------------------
age = int(input('How old are you? '))
# -------------------------------------------------------------------------
print('That is ' + str(age * 12) + ' months!')
if age >= 18:
print('You are old enough to vote, nice!')
else:
print('You will be able to vote in ' + str(18 - age) + ' years.')
###############################################################################
# Example: how to INPUT a FLOAT (floating point number) from the Console
###############################################################################
def input_a_float():
print()
print('--------------------------------------------------')
print('Demonstrating CONSOLE INPUT of a FLOATING POINT number:')
print('--------------------------------------------------')
# ----------- Using the INPUT and FLOAT functions ------------------
money = float(input('How much money do you have? '))
# -------------------------------------------------------------------------
potatoes_today = round((money / 6.46) * 10)
potatoes_1900 = round((money / 0.140) * 10)
print('According to Infoplease')
print('at http://www.infoplease.com/ipa/A0873707.html')
f_string1 = ' -- That would buy you {} pounds of potatoes in 2015.'
f_string2 = ' -- That would buy you {} pounds of potatoes in 1900!'
print(f_string1.format(potatoes_today))
print(f_string2.format(potatoes_1900))
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 39.791667 | 79 | 0.442513 |
ace9b81dc44586bfd15031cc4e509932c7ffcbb8 | 4,712 | py | Python | google/ads/googleads/v7/services/types/customer_label_service.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | 285 | 2018-10-05T16:47:58.000Z | 2022-03-31T00:58:39.000Z | google/ads/googleads/v7/services/types/customer_label_service.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | 425 | 2018-09-10T13:32:41.000Z | 2022-03-31T14:50:05.000Z | google/ads/googleads/v7/services/types/customer_label_service.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | 369 | 2018-11-28T07:01:00.000Z | 2022-03-28T09:53:22.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v7.resources.types import customer_label
from google.rpc import status_pb2 as status # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v7.services",
marshal="google.ads.googleads.v7",
manifest={
"GetCustomerLabelRequest",
"MutateCustomerLabelsRequest",
"CustomerLabelOperation",
"MutateCustomerLabelsResponse",
"MutateCustomerLabelResult",
},
)
class GetCustomerLabelRequest(proto.Message):
r"""Request message for
[CustomerLabelService.GetCustomerLabel][google.ads.googleads.v7.services.CustomerLabelService.GetCustomerLabel].
Attributes:
resource_name (str):
Required. The resource name of the customer-
abel relationship to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
class MutateCustomerLabelsRequest(proto.Message):
r"""Request message for
[CustomerLabelService.MutateCustomerLabels][google.ads.googleads.v7.services.CustomerLabelService.MutateCustomerLabels].
Attributes:
customer_id (str):
Required. ID of the customer whose customer-
abel relationships are being modified.
operations (Sequence[google.ads.googleads.v7.services.types.CustomerLabelOperation]):
Required. The list of operations to perform
on customer-label relationships.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
"""
customer_id = proto.Field(proto.STRING, number=1,)
operations = proto.RepeatedField(
proto.MESSAGE, number=2, message="CustomerLabelOperation",
)
partial_failure = proto.Field(proto.BOOL, number=3,)
validate_only = proto.Field(proto.BOOL, number=4,)
class CustomerLabelOperation(proto.Message):
r"""A single operation (create, remove) on a customer-label
relationship.
Attributes:
create (google.ads.googleads.v7.resources.types.CustomerLabel):
Create operation: No resource name is
expected for the new customer-label
relationship.
remove (str):
Remove operation: A resource name for the customer-label
relationship being removed, in this format:
``customers/{customer_id}/customerLabels/{label_id}``
"""
create = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=customer_label.CustomerLabel,
)
remove = proto.Field(proto.STRING, number=2, oneof="operation",)
class MutateCustomerLabelsResponse(proto.Message):
r"""Response message for a customer labels mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v7.services.types.MutateCustomerLabelResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE, number=3, message=status.Status,
)
results = proto.RepeatedField(
proto.MESSAGE, number=2, message="MutateCustomerLabelResult",
)
class MutateCustomerLabelResult(proto.Message):
r"""The result for a customer label mutate.
Attributes:
resource_name (str):
Returned for successful operations.
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
| 34.903704 | 124 | 0.687394 |
ace9b86a184f25461e7e8785656bd125aa50b712 | 543 | py | Python | python/books/hard_way/p15/ex17.py | ShenJinXiang/example | 9d3bdf73079092791d3f96d73573ee51d66774ab | [
"MIT"
] | null | null | null | python/books/hard_way/p15/ex17.py | ShenJinXiang/example | 9d3bdf73079092791d3f96d73573ee51d66774ab | [
"MIT"
] | null | null | null | python/books/hard_way/p15/ex17.py | ShenJinXiang/example | 9d3bdf73079092791d3f96d73573ee51d66774ab | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from sys import argv
from os.path import exists
script, from_file, to_file = argv
print(f"Copying from {from_file} to {to_file}")
# we could do these two on one line, how?
in_file = open(from_file)
indata = in_file.read()
print(f"The input file is {len(indata)} bytes long")
print(f"Does the output file exist? {exists(to_file)}")
print(f"Ready, hit RETURN to continue, CTRL-C to abort.")
input()
out_file = open(to_file, 'w')
out_file.write(indata)
print("Alright, all done.")
out_file.close()
in_file.close() | 20.111111 | 57 | 0.720074 |
ace9b9247b0dcce21a4f4e8b86555e0def8a4f3f | 46,555 | py | Python | rangeset/test/test_rangeset.py | baishancloud/dkit | 63f354886f8dd5059d557719be12c963952aa3d0 | [
"MIT"
] | 13 | 2016-12-16T09:23:09.000Z | 2018-03-10T08:04:00.000Z | rangeset/test/test_rangeset.py | npc9001/pykit | 63f354886f8dd5059d557719be12c963952aa3d0 | [
"MIT"
] | 74 | 2017-03-23T11:36:22.000Z | 2018-04-02T06:19:09.000Z | rangeset/test/test_rangeset.py | drmingdrmer/pykit | e25a71146e81aaf79625cf8d4f4c439ccd515b82 | [
"MIT"
] | 5 | 2016-12-27T07:30:47.000Z | 2018-03-10T07:06:21.000Z | #!/usr/bin/env python2
# coding: utf-8
import logging
import unittest
from pykit import rangeset
from pykit import ututil
dd = ututil.dd
logger = logging.getLogger(__name__)
class TestRange(unittest.TestCase):
def test_init(self):
a = rangeset.Range(1, 2)
dd(a)
a = rangeset.Range(1, 1)
dd(a)
def test_has(self):
cases = (
([None, None], 0, True),
([None, None], 1, True),
([None, 1], 0, True),
([None, 1], 1, False),
([None, 1], 2, False),
([1, None], 0, False),
([1, None], 1, True),
([1, None], 2, True),
([1, 1], 1, False),
([1, 3], 0, False),
([1, 3], 1, True),
([1, 3], 2, True),
([1, 3], 3, False),
([1, 3], 4, False),
([None, None], '', True),
(['', None], '', True),
([None, ''], '', False),
([None, 'a'], '', True),
(['', 'a'], '', True),
(['', 'a'], 'a', False),
(['', 'a'], 'b', False),
(['a', 'b'], 'a', True),
(['a', 'b'], 'b', False),
(['a', 'b'], 'c', False),
)
dd()
for rng, val, expected in cases:
dd('case:', rng, val, expected)
rng = rangeset.Range(*rng)
rst = rng.has(val)
self.assertEqual(expected, rst)
def test_is_adjacent(self):
cases = (
([None, None], [None, None], False),
([None, 0], [0, None], True),
([None, 1], [1, None], True),
([None, 1], [2, None], False),
([None, 1], [None, 2], False),
([1, None], [1, None], False),
([0, 1], [1, 3], True),
([1, 1], [1, 1], True),
([0, 1], [2, 3], False),
(['', 'a'], ['a', 'b'], True),
(['', 'a'], ['aa', 'b'], False),
(['', 'c'], ['a', 'b'], False),
(['', None], ['a', 'b'], False),
(['', 'c'], [None, 'b'], False),
)
for a, b, expected in cases:
dd('case:', a, b, expected)
a = rangeset.Range(*a)
b = rangeset.Range(*b)
rst = a.is_adjacent(b)
dd('rst:', rst)
self.assertEqual(expected, rst)
def test_cmp(self):
cases = (
([None, None], [None, None], 0),
([None, 1], [1, None], 0),
([None, 1], [1, 2], 0),
([None, 1], [2, 3], -1),
([1, None], [None, 1], 0),
([1, None], [None, 0], 1),
([1, None], [-1, 0], 1),
([0, 1], [1, None], 0),
([0, 2], [1, None], 0),
([-1, 0], [1, None], -1),
)
for a, b, expected in cases:
dd('case:', a, b, expected)
a = rangeset.Range(*a)
b = rangeset.Range(*b)
rst = a.cmp(b)
dd('rst:', rst)
self.assertEqual(expected, rst)
def test_substract(self):
cases = (
([None, None], [None, None], [None, None]),
([None, None], [1, None], [[None, 1], None]),
([None, None], [None, 1], [None, [1, None]]),
([None, None], [1, 3], [[None, 1], [3, None]]),
([None, 5], [5, 8], [[None, 5], None]),
([None, 5], [4, 8], [[None, 4], None]),
([None, 5], [1, 2], [[None, 1], [2, 5]]),
([None, 5], [None, 2], [None, [2, 5]]),
([None, 5], [None, 5], [None, None]),
([5, None], [1, 2], [None, [5, None]]),
([5, None], [1, 8], [None, [8, None]]),
([5, None], [5, 8], [None, [8, None]]),
([5, None], [6, 8], [[5, 6], [8, None]]),
([5, None], [6, None], [[5, 6], None]),
([5, None], [5, None], [None, None]),
([5, None], [4, None], [None, None]),
([5, 10], [5, None], [None, None]),
([5, 10], [6, None], [[5, 6], None]),
([5, 10], [6, 7], [[5, 6], [7, 10]]),
([5, 10], [6, 10], [[5, 6], None]),
)
for a, b, expected in cases:
dd('case:', a, b, expected)
a = rangeset.Range(*a)
b = rangeset.Range(*b)
# test module method
rst = rangeset.substract_range(a, b)
dd('rst:', rst)
self.assertEqual(expected, rst)
# test class method
rst = a.substract(b)
dd('rst:', rst)
self.assertEqual(expected, rst)
def test_intersect(self):
cases = (
([None, None], [None, None], [None, None] ),
([None, None], [1, None], [1, None] ),
([None, None], [None, 1], [None, 1] ),
([None, None], [1, 3], [1, 3] ),
([None, 5], [5, 8], None ),
([None, 5], [4, 8], [4, 5] ),
([None, 5], [1, 2], [1, 2] ),
([None, 5], [None, 2], [None, 2] ),
([None, 5], [None, 5], [None, 5] ),
([5, None], [1, 2], None ),
([5, None], [1, 8], [5, 8] ),
([5, None], [5, 8], [5, 8] ),
([5, None], [6, 8], [6, 8] ),
([5, None], [6, None], [6, None] ),
([5, None], [5, None], [5, None] ),
([5, None], [4, None], [5, None] ),
([5, 10], [5, None], [5, 10] ),
([5, 10], [6, None], [6, 10] ),
([5, 10], [6, 7], [6, 7] ),
([5, 10], [6, 10], [6, 10] ),
)
for a, b, expected in cases:
dd('case:', a, b, expected)
a = rangeset.Range(*a)
self.assertEqual(expected, a.intersect(rangeset.Range(*b)))
self.assertEqual(expected, a.intersect(rangeset.ValueRange(*(b+['bar']))))
# __and__
self.assertEqual(expected, a & b)
self.assertEqual(expected, a & (b+['bar']))
# __rand__
self.assertEqual(expected, b & a)
self.assertEqual(expected, (b+['bar']) & a)
a = rangeset.ValueRange(*(a+['ahh']))
if expected is not None:
expected = expected + ['ahh']
self.assertEqual(expected, a.intersect(rangeset.Range(*b)))
self.assertEqual(expected, a.intersect(rangeset.ValueRange(*(b+['bar']))))
# __and__
self.assertEqual(expected, a & b)
self.assertEqual(expected, a & (b+['bar']))
# __rand__
self.assertEqual(expected, b & a)
self.assertEqual(expected, (b+['bar']) & a)
def test_length(self):
inf = float('inf')
cases = (
([None, None], inf),
([1, None], inf),
([None, 1], inf),
([None, ''], inf),
([None, ()], inf),
([None, []], inf),
([1, 1], 0),
([1, 2], 1),
([1.0, 2.2], 1.2),
(['', '\0'], 1.0/257),
(['', '\xff'], 256/257.0),
(['\0', '\0'], 0),
(['\0', '\1'], 1.0/257.0),
(['\0', '\xff'], 255.0/257.0),
(['ab', 'ab'], 0),
(['ab'*1024, 'ab'*1024], 0),
(['ab', 'abc'], (0x63 + 1) / (257.0**3)),
(['abb', 'abc'], 1 / 257.0 ** 3),
(['', '\xff' * 20], 1), # just like that 0.99999... == 1
)
for rng, expected in cases:
dd('case:', rng, expected)
rst = rangeset.Range(*rng).length()
dd('rst:', rst)
self.assertAlmostEqual(expected, rst)
def test_val(self):
cases = (
([None, None, None], None),
([None, None, 1], 1),
([None, None, ''], ''),
([None, None, ['']], ['']),
([None, None, ('',)], ('',)),
([None, None, {'1': 1}], {'1': 1}),
)
for rng, expected in cases:
rst = rangeset.ValueRange(*rng)
self.assertEqual(expected, rst.val())
def test_unicode(self):
# fix: https://github.com/bsc-s2/pykit/issues/430
# valid unicode range
cases = (
[u'我', None, 0],
[None, u'我', 0],
[u'它', u'我', 0],
)
for rng in cases:
r = rangeset.ValueRange(*rng)
# left > right
cases = (
[u'我a', u'我', 0],
)
for rng in cases:
self.assertRaises(ValueError, rangeset.ValueRange, *rng)
# incompatible
cases = (
[u'我', '我', 0],
[u'我', 0, 0],
[u'我', 0.0, 0],
[u'我', (), 0],
[u'我', [], 0],
[u'我', {}, 0],
)
for l, r, v in cases:
self.assertRaises(TypeError, rangeset.ValueRange, l, r, v)
self.assertRaises(TypeError, rangeset.ValueRange, r, l, v)
class TestRangeSet(unittest.TestCase):
def test_init(self):
a = rangeset.RangeSet()
self.assertIsInstance(a, list)
self.assertEqual(0, len(a))
a = rangeset.RangeSet([])
self.assertEqual(0, len(a))
def test_invalid_element_type(self):
cases = (
int, long, float, str, tuple, list
)
dd()
for typ in cases:
dd('test valid type: ', typ)
rangeset.Range(typ(), typ())
rangeset.RangeSet([[typ(), typ()]])
cases = (
lambda x: 1,
True,
)
dd()
for val in cases:
dd('test invalid type: ', typ)
self.assertRaises(TypeError, rangeset.Range, [val, val])
self.assertRaises(TypeError, rangeset.RangeSet, [[val, val]])
# incompatible type
self.assertRaises(TypeError, rangeset.Range, 1, 'a')
def test_range_left_le_right(self):
self.assertRaises(ValueError, rangeset.Range, 1, 0)
def test_int_compare(self):
a = rangeset.RangeSet([])
self.assertEqual([], a)
def test_range_incorrect_order(self):
cases = (
[[None, None], [1, 2]],
[[0, None], [1, 2]],
[[1, 2], [None, 5]],
[[1, 2], [2, 3]],
[[1, 4], [3, 5]],
[[3, 4], [1, 2]],
)
for rs in cases:
dd('case:', rs)
try:
rangeset.RangeSet(rs)
except Exception as e:
dd(repr(e))
self.assertRaises(ValueError, rangeset.RangeSet, rs)
def test_int_add_error(self):
cases = (
([], None, TypeError),
([], True, TypeError),
([], {}, ValueError),
([], 1, TypeError),
([], 1.1, TypeError),
([], [1, 2, 3], TypeError),
([], lambda x: True, TypeError),
)
dd()
for init, ins, err in cases:
dd('case: ', init, ins, err)
a = rangeset.RangeSet(init)
self.assertRaises(err, a.add, ins)
def test_int_add(self):
cases = (
# add into empty range set.
([], [1, 1], [[1, 1]]),
([], [1, 2], [[1, 2]]),
([], [1, 3], [[1, 3]]),
# collapse two range if necesary.
([[10, 20], [30, 40]], [1, 2], [[1, 2], [10, 20], [30, 40]]),
([[10, 20], [30, 40]], [1, 10], [[1, 20], [30, 40]]),
([[10, 20], [30, 40]], [1, 12], [[1, 20], [30, 40]]),
([[10, 20], [30, 40]], [10, 15], [[10, 20], [30, 40]]),
([[10, 20], [30, 40]], [11, 15], [[10, 20], [30, 40]]),
([[10, 20], [30, 40]], [1, 22], [[1, 22], [30, 40]]),
([[10, 20], [30, 40]], [15, 25], [[10, 25], [30, 40]]),
([[10, 20], [30, 40]], [20, 25], [[10, 25], [30, 40]]),
([[10, 20], [30, 40]], [22, 25], [[10, 20], [22, 25], [30, 40]]),
([[10, 20], [30, 40]], [22, 30], [[10, 20], [22, 40]]),
([[10, 20], [30, 40]], [22, 32], [[10, 20], [22, 40]]),
([[10, 20], [30, 40]], [30, 32], [[10, 20], [30, 40]]),
([[10, 20], [30, 40]], [30, 42], [[10, 20], [30, 42]]),
([[10, 20], [30, 40]], [32, 42], [[10, 20], [30, 42]]),
([[10, 20], [30, 40]], [40, 50], [[10, 20], [30, 50]]),
([[10, 20], [30, 40]], [42, 50], [[10, 20], [30, 40], [42, 50]]),
# overlapping with more than one range
([[10, 20], [30, 40]], [20, 30], [[10, 40]]),
([[10, 20], [30, 40]], [19, 30], [[10, 40]]),
([[10, 20], [30, 40]], [20, 31], [[10, 40]]),
([[10, 20], [30, 40]], [0, 35], [[0, 40]]),
([[10, 20], [30, 40]], [15, 50], [[10, 50]]),
([[10, 20], [30, 40]], [15, 50], [[10, 50]]),
([[10, 20], [30, 40]], [15, None], [[10, None]]),
([[10, 20], [30, 40]], [None, 15], [[None, 20], [30, 40]]),
([[10, 20], [30, 40]], [None, 35], [[None, 40]]),
([[10, 20], [30, 40]], [None, None], [[None, None]]),
)
dd()
for init, ins, expected in cases:
dd('cases: ', init, ins, expected)
a = rangeset.RangeSet(init)
a.add(ins)
self.assertEqual(expected, a)
def test_rangeset_has(self):
rs = rangeset.RangeSet([[None, 1], [10, 20], [30, 40], [50, None]])
cases = (
(-1, True),
(0, True),
(1, False),
(5, False),
(9, False),
(10, True),
(15, True),
(20, False),
(21, False),
(29, False),
(30, True),
(31, True),
(40, False),
(49, False),
(50, True),
(51, True),
)
for val, expected in cases:
dd('case:', val, expected)
rst = rs.has(val)
dd('rst:', rst)
self.assertEqual(expected, rst)
def test_union(self):
cases = (
([[None, 10], [20, 30], [40, None]], [[None, None]], [[None, None]]),
([[None, 10], [20, 30], [40, None]], [[None, 1]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[None, 10]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[None, 11]], [[None, 11], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[9, 11]], [[None, 11], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[10, 11]], [[None, 11], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[11, 12]], [[None, 10], [11, 12], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[19, 20]], [[None, 10], [19, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[19, 21]], [[None, 10], [19, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[20, 21]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[24, 25]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[29, 30]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[29, 31]], [[None, 10], [20, 31], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[30, 31]], [[None, 10], [20, 31], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[31, 32]], [[None, 10], [20, 30], [31, 32], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[39, 40]], [[None, 10], [20, 30], [39, None]]),
([[None, 10], [20, 30], [40, None]], [[39, 41]], [[None, 10], [20, 30], [39, None]]),
([[None, 10], [20, 30], [40, None]], [[40, 41]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[41, 42]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[41, None]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[8, 25], [35, 40]], [[None, 30], [35, None]]),
)
dd()
for a, b, expected in cases:
dd('case:', a, b, expected)
a = rangeset.RangeSet(a)
b = rangeset.RangeSet(b)
rst = rangeset.union(a, b)
dd('rst:', rst)
self.assertEqual(expected, rst)
def test_substract(self):
cases = (
([[None, 10], [20, 30], [40, None]], [[None, None]], []),
([[None, 10], [20, 30], [40, None]], [[None, 1]], [[1, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[None, 10]], [[20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[None, 11]], [[20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[9, 11]], [[None, 9], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[10, 11]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[11, 12]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[19, 20]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[19, 21]], [[None, 10], [21, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[20, 21]], [[None, 10], [21, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[24, 25]], [[None, 10], [20, 24], [25, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[29, 30]], [[None, 10], [20, 29], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[29, 31]], [[None, 10], [20, 29], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[30, 31]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[31, 32]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[39, 40]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[39, 41]], [[None, 10], [20, 30], [41, None]]),
([[None, 10], [20, 30], [40, None]], [[40, 41]], [[None, 10], [20, 30], [41, None]]),
([[None, 10], [20, 30], [40, None]], [[41, 42]], [[None, 10], [20, 30], [40, 41], [42, None]]),
([[None, 10], [20, 30], [40, None]], [[41, None]], [[None, 10], [20, 30], [40, 41], ]),
([[20, 30]], [[20, 24], [25, 30]], [[24, 25]]),
([[None, 10], [20, 30], [40, None]], [[20, 24], [25, 30]], [[None, 10], [24, 25], [40, None], ]),
([[None, 10], [20, 30], [40, None]], [[1, 2], [8, 25], [35, 45]], [[None, 1], [2, 8], [25, 30], [45, None]]),
)
dd()
for a, b, expected in cases:
dd('case:', a, b, expected)
a = rangeset.RangeSet(a)
b = rangeset.RangeSet(b)
rst = rangeset.substract(a, b)
dd('rst:', rst)
self.assertEqual(expected, rst)
def test_intersect(self):
cases = (
([[None, 10], [20, 30], [40, None]], [[None, None]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[None, 1]], [[None, 1]]),
([[None, 10], [20, 30], [40, None]], [[None, 10]], [[None, 10]]),
([[None, 10], [20, 30], [40, None]], [[None, 11]], [[None, 10]]),
([[None, 10], [20, 30], [40, None]], [[9, 11]], [[9, 10]]),
([[None, 10], [20, 30], [40, None]], [[10, 11]], []),
([[None, 10], [20, 30], [40, None]], [[11, 12]], []),
([[None, 10], [20, 30], [40, None]], [[19, 20]], []),
([[None, 10], [20, 30], [40, None]], [[19, 21]], [[20, 21]]),
([[None, 10], [20, 30], [40, None]], [[20, 21]], [[20, 21]]),
([[None, 10], [20, 30], [40, None]], [[24, 25]], [[24, 25]]),
([[None, 10], [20, 30], [40, None]], [[29, 30]], [[29, 30]]),
([[None, 10], [20, 30], [40, None]], [[29, 31]], [[29, 30]]),
([[None, 10], [20, 30], [40, None]], [[30, 31]], []),
([[None, 10], [20, 30], [40, None]], [[31, 32]], []),
([[None, 10], [20, 30], [40, None]], [[39, 40]], []),
([[None, 10], [20, 30], [40, None]], [[39, 41]], [[40, 41]]),
([[None, 10], [20, 30], [40, None]], [[40, 41]], [[40, 41]]),
([[None, 10], [20, 30], [40, None]], [[41, 42]], [[41, 42]]),
([[None, 10], [20, 30], [40, None]], [[41, None]], [[41, None]]),
([[None, 10], [20, 30], [40, None]], [[1, 2], [8, 25], [35, 45]], [[1, 2], [8, 10], [20, 25], [40, 45]]),
)
dd()
for a, b, expected in cases:
dd('case:', a, b, expected)
a = rangeset.RangeSet(a)
b = rangeset.RangeSet(b)
rst = rangeset.intersect(a, b)
dd('rst:', rst)
self.assertEqual(expected, rst)
def test_length(self):
rst = rangeset.RangeSet([[1, 2], [5, 8]]).length()
self.assertEqual(4, rst)
rst = rangeset.RangeSet([['a', 'b'], ['d', 'd\0']]).length()
self.assertEqual(1.0/257.0 + 1.0/257.0/257.0, rst)
class TestIntIncRangeSet(unittest.TestCase):
def test_int_inc_substract(self):
cases = (
([[None, 10], [20, 30], [40, None]], [[None, None]], []),
([[None, 10], [20, 30], [40, None]], [[None, 1]], [[2, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[None, 10]], [[20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[None, 11]], [[20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[9, 11]], [[None, 8], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[10, 11]], [[None, 9], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[11, 12]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[19, 20]], [[None, 10], [21, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[19, 21]], [[None, 10], [22, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[20, 21]], [[None, 10], [22, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[24, 25]], [[None, 10], [20, 23], [26, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[29, 30]], [[None, 10], [20, 28], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[29, 31]], [[None, 10], [20, 28], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[30, 31]], [[None, 10], [20, 29], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[31, 32]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[39, 40]], [[None, 10], [20, 30], [41, None]]),
([[None, 10], [20, 30], [40, None]], [[39, 41]], [[None, 10], [20, 30], [42, None]]),
([[None, 10], [20, 30], [40, None]], [[40, 41]], [[None, 10], [20, 30], [42, None]]),
([[None, 10], [20, 30], [40, None]], [[41, 42]], [[None, 10], [20, 30], [40, 40], [43, None]]),
([[None, 10], [20, 30], [40, None]], [[41, None]], [[None, 10], [20, 30], [40, 40], ]),
([[20, 30]], [[20, 23], [25, 30]], [[24, 24]]),
([[20, 30]], [[20, 22], [27, 30]], [[23, 26]]),
([[None, 10], [20, 30], [40, None]], [[20, 23], [26, 30]], [[None, 10], [24, 25], [40, None], ]),
([[None, 10], [20, 30], [40, None]], [[1, 2], [8, 25], [35, 45]], [[None, 0], [3, 7], [26, 30], [46, None]]),
)
dd()
for a, b, expected in cases:
dd('case:', a, b, expected)
a = rangeset.RangeSet(a, range_clz=rangeset.IntIncRange)
b = rangeset.RangeSet(b, range_clz=rangeset.IntIncRange)
rst = rangeset.substract(a, b)
dd('rst:', rst)
self.assertEqual(expected, rst)
def test_int_inc_union(self):
cases = (
([[None, 10], [20, 30], [40, None]], [[None, None]], [[None, None]]),
([[None, 10], [20, 30], [40, None]], [[None, 1]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[None, 10]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[None, 11]], [[None, 11], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[9, 11]], [[None, 11], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[10, 11]], [[None, 11], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[11, 12]], [[None, 12], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[12, 13]], [[None, 10], [12, 13], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[18, 19]], [[None, 10], [18, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[19, 21]], [[None, 10], [19, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[20, 21]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[24, 25]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[29, 30]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[29, 31]], [[None, 10], [20, 31], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[30, 31]], [[None, 10], [20, 31], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[31, 32]], [[None, 10], [20, 32], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[32, 33]], [[None, 10], [20, 30], [32, 33], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[39, 39]], [[None, 10], [20, 30], [39, None]]),
([[None, 10], [20, 30], [40, None]], [[39, 40]], [[None, 10], [20, 30], [39, None]]),
([[None, 10], [20, 30], [40, None]], [[39, 41]], [[None, 10], [20, 30], [39, None]]),
([[None, 10], [20, 30], [40, None]], [[40, 41]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[41, 42]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[41, None]], [[None, 10], [20, 30], [40, None]]),
([[None, 10], [20, 30], [40, None]], [[8, 25], [35, 40]], [[None, 30], [35, None]]),
)
dd()
for a, b, expected in cases:
dd('case:', a, b, expected)
a = rangeset.RangeSet(a, range_clz=rangeset.IntIncRange)
b = rangeset.RangeSet(b, range_clz=rangeset.IntIncRange)
rst = rangeset.union(a, b)
dd('rst:', rst)
self.assertEqual(expected, rst)
def test_int_inc_length(self):
rst = rangeset.IntIncRangeSet([[1, 2], [5, 8]]).length()
self.assertEqual(6, rst)
def test_inherit_range_clz(self):
a = rangeset.IntIncRangeSet([[1, 2]])
b = rangeset.IntIncRangeSet([[2, 3], [5, 6]])
cases = (
rangeset.union,
rangeset.substract,
rangeset.intersect,
)
dd()
for func in cases:
dd('func:', func)
rst = func(a, b)
self.assertIs(a[0].__class__, rst[0].__class__)
rst = func(b, a)
self.assertIs(a[0].__class__, rst[0].__class__)
class TestRangeDict(unittest.TestCase):
def test_init(self):
rd = rangeset.RangeDict([(1, 2, '12'), [3, 4, '34']])
cases = (
1, 1.5, 3, 3.5,
)
for present in cases:
self.assertTrue(rd.has(present))
cases = (
0, 2, 2.5, 4, 4.5
)
for absent in cases:
self.assertFalse(rd.has(absent))
def test_adjacent_elts(self):
rd = rangeset.RangeDict([(1, 2, '12'), [2, 3, '23']])
self.assertEqual('12', rd.get(1.5))
self.assertEqual('23', rd.get(2))
def test_get(self):
rd = rangeset.RangeDict([(1, 2, '12'), [3, 4, '34']])
cases = (
(1, '12'),
(1.5, '12'),
(3, '34'),
(3.5, '34'),
)
for present, val in cases:
self.assertEqual(val, rd.get(present))
cases = (
0, 2, 2.5, 4, 4.5
)
for absent in cases:
self.assertRaises(KeyError, rd.get, absent)
def test_get_min(self):
cases = (
(((1, 2, '12'), ), (0, [1, 2, '12'], '12')),
(((1, 2, '34'), (3, 4, '12')), (1, [3, 4, '12'], '12')),
(((1, 2, '34'), (3, 4, '12'), (5, 6, '56')), (1, [3, 4, '12'], '12')),
(((1, 2, '12'), (3, 4, '34'), (5, 6, '12')), (0, [1, 2, '12'], '12')),
)
for src_rngs, expected in cases:
dd("expected index: ", expected[0])
dd("expected range: ", rangeset.ValueRange(*expected[1]))
dd("expected value: ", expected[2])
rd = rangeset.RangeDict(src_rngs)
idx, rng, val = rd.get_min()
dd("got index: ", idx)
dd("got range: ", rng)
dd("got value: ", val)
self.assertEqual(idx, expected[0])
self.assertEqual(rng, rangeset.ValueRange(*expected[1]))
self.assertEqual(val, expected[2])
rd = rangeset.RangeDict()
self.assertRaises(ValueError, rd.get_min)
def test_get_min_is_lt(self):
cases = (
(((1, 2, '12'), (3, 4, '34'), (5, 6, '12')), lambda a, b: a < b, (0, [1, 2, '12'], '12')),
(((1, 2, '12'), (3, 4, '34'), (5, 6, '12')), lambda a, b: a > b, (1, [3, 4, '34'], '34')),
)
for src_rngs, is_lt, expected in cases:
dd("expected index: ", expected[0])
dd("expected range: ", rangeset.ValueRange(*expected[1]))
dd("expected value: ", expected[2])
rd = rangeset.RangeDict(src_rngs)
idx, rng, val = rd.get_min(is_lt)
dd("got index: ", idx)
dd("got range: ", rng)
dd("got value: ", val)
self.assertEqual(idx, expected[0])
self.assertEqual(rng, rangeset.ValueRange(*expected[1]))
self.assertEqual(val, expected[2])
def test_add(self):
cases = (
([None, 0, 'xx'], [[None, 0, 'xx'], [1, 5, '15'], [7, 9, '79']]),
([None, 1, 'xx'], [[None, 1, 'xx'], [1, 5, '15'], [7, 9, '79']]),
([None, 2, 'xx'], [[None, 2, 'xx'], [2, 5, '15'], [7, 9, '79']]),
([0, 2, 'xx'], [[0, 2, 'xx'], [2, 5, '15'], [7, 9, '79']]),
([1, 2, 'xx'], [[1, 2, 'xx'], [2, 5, '15'], [7, 9, '79']]),
([1, 5, 'xx'], [[1, 5, 'xx'], [7, 9, '79']]),
([2, 3, 'xx'], [[1, 2, '15'], [2, 3, 'xx'], [3, 5, '15'], [7, 9, '79']]),
([2, 4, 'xx'], [[1, 2, '15'], [2, 4, 'xx'], [4, 5, '15'], [7, 9, '79']]),
([2, 5, 'xx'], [[1, 2, '15'], [2, 5, 'xx'], [7, 9, '79']]),
([2, 6, 'xx'], [[1, 2, '15'], [2, 6, 'xx'], [7, 9, '79']]),
([2, 7, 'xx'], [[1, 2, '15'], [2, 7, 'xx'], [7, 9, '79']]),
([2, 8, 'xx'], [[1, 2, '15'], [2, 8, 'xx'], [8, 9, '79']]),
([5, 7, 'xx'], [[1, 5, '15'], [5, 7, 'xx'], [7, 9, '79']]),
([5, 8, 'xx'], [[1, 5, '15'], [5, 8, 'xx'], [8, 9, '79']]),
([6, 8, 'xx'], [[1, 5, '15'], [6, 8, 'xx'], [8, 9, '79']]),
([6, 9, 'xx'], [[1, 5, '15'], [6, 9, 'xx'], ]),
([6, 10, 'xx'], [[1, 5, '15'], [6, 10, 'xx'], ]),
([7, 8, 'xx'], [[1, 5, '15'], [7, 8, 'xx'], [8, 9, '79']]),
([7, 9, 'xx'], [[1, 5, '15'], [7, 9, 'xx']]),
([7, 10, 'xx'], [[1, 5, '15'], [7, 10, 'xx']]),
([8, 10, 'xx'], [[1, 5, '15'], [7, 8, '79'], [8, 10, 'xx']]),
([9, 10, 'xx'], [[1, 5, '15'], [7, 9, '79'], [9, 10, 'xx']]),
([10, 11, 'xx'], [[1, 5, '15'], [7, 9, '79'], [10, 11, 'xx']]),
([10, None, 'xx'], [[1, 5, '15'], [7, 9, '79'], [10, None, 'xx']]),
([None, None, 'xx'], [[None, None, 'xx']]),
([2, None, 'xx'], [[1, 2, '15'], [2, None, 'xx']]),
([0, 3, '15'], [[0, 5, '15'], [7, 9, '79']]),
([2, 6, '15'], [[1, 6, '15'], [7, 9, '79']]),
([2, 8, '15'], [[1, 8, '15'], [8, 9, '79']]),
)
for a, expected in cases:
rd = rangeset.RangeDict([(1, 5, '15'), [7, 9, '79']])
dd('init:', rd)
dd('add:', a)
dd('expected:', expected)
rd.add(a[:2], a[2])
self.assertEqual(expected, rd)
def test_normalize(self):
cases = (
([(None, 2, '12'), [2, 3, '12']],
[[None, 3, '12']]),
([(None, 2, '12'), [2, None, '12']],
[[None, None, '12']]),
([(1, 2, '12'), [2, 3, '12']],
[[1, 3, '12']]),
([(1, 2, '12'), [2, 3, '12'], [3, 4, '12']],
[[1, 4, '12']]),
([(1, 2, '12'), [2, 3, 'foo']],
[[1, 2, '12'], [2, 3, 'foo']]),
)
for ori, expected in cases:
rd = rangeset.RangeDict(ori)
rd.normalize()
self.assertEqual(expected, rd)
def test_substract(self):
cases = (
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[None, None]], []),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[None, 1]], [[1, 10, 'a'], [20, 30, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[None, 10]], [ [20, 30, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[None, 11]], [ [20, 30, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[9, 11]], [[None, 9, 'a'], [20, 30, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[10, 11]], [[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[11, 12]], [[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[19, 20]], [[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[19, 21]], [[None, 10, 'a'], [21, 30, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[20, 21]], [[None, 10, 'a'], [21, 30, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[24, 25]], [[None, 10, 'a'], [20, 24, 'b'], [25, 30, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[29, 30]], [[None, 10, 'a'], [20, 29, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[29, 31]], [[None, 10, 'a'], [20, 29, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[30, 31]], [[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[31, 32]], [[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[39, 40]], [[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[39, 41]], [[None, 10, 'a'], [20, 30, 'b'], [41, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[40, 41]], [[None, 10, 'a'], [20, 30, 'b'], [41, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[41, 42]], [[None, 10, 'a'], [20, 30, 'b'], [40, 41, 'c'], [42, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[41, None]], [[None, 10, 'a'], [20, 30, 'b'], [40, 41, 'c'], ]),
([[20, 30, 'a'] ], [[20, 24], [25, 30]], [[24, 25, 'a']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[20, 24], [25, 30]], [[None, 10, 'a'], [24, 25, 'b'], [40, None, 'c'], ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [[1, 2], [8, 25], [35, 45]], [[None, 1, 'a'], [2, 8, 'a'], [25, 30, 'b'], [45, None, 'c']]),
)
dd()
for a, b, expected in cases:
dd('case:', a, b, expected)
# RangeDict - RangeSet
a = rangeset.RangeDict(a)
b = rangeset.RangeSet(b)
rst = rangeset.substract(a, b)
dd('rst:', rst)
self.assertEqual(expected, rst)
# RangeDict - RangeDict
b = rangeset.RangeDict(b)
rst = rangeset.substract(a, b)
dd('rst:', rst)
self.assertEqual(expected, rst)
def test_intersect(self):
# intersect is implemented with substract, thus we simplified the test.
a = rangeset.RangeDict([[1, 3, 'a'], [5, 10, 'b']])
b = rangeset.RangeDict([[2, 8, 'x']])
self.assertEqual([[2, 3, 'a'], [5, 8, 'b']], rangeset.intersect(a, b))
def test_find_overlapped(self):
cases = (
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [None, None], [[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [None, 1], [[None, 10, 'a'] ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [None, 10], [[None, 10, 'a'] ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [None, 11], [[None, 10, 'a'] ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [9, 11], [[None, 10, 'a'] ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [9, 21], [[None, 10, 'a'], [20, 30, 'b'] ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [9, 40], [[None, 10, 'a'], [20, 30, 'b'] ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [9, 41], [[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [10, 11], [ ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [11, 12], [ ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [19, 20], [ ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [19, 21], [ [20, 30, 'b'] ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [20, 21], [ [20, 30, 'b'] ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [24, 25], [ [20, 30, 'b'] ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [29, 30], [ [20, 30, 'b'] ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [29, 31], [ [20, 30, 'b'] ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [29, 41], [ [20, 30, 'b'], [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [30, 31], [ ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [31, 32], [ ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [39, 40], [ ]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [39, 41], [ [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [40, 41], [ [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [41, 42], [ [40, None, 'c']]),
([[None, 10, 'a'], [20, 30, 'b'], [40, None, 'c']], [41, None], [ [40, None, 'c']]),
)
dd()
for a, b, expected in cases:
dd('case:', a, b, expected)
a = rangeset.RangeDict(a)
self.assertEqual(expected, a.find_overlapped(b))
self.assertEqual(expected, a.find_overlapped(rangeset.Range(*b)))
self.assertEqual(expected, a.find_overlapped(rangeset.ValueRange(*(b+['bar']))))
a = rangeset.RangeSet([x[:2] for x in a])
expected = [x[:2] for x in expected]
self.assertEqual(expected, a.find_overlapped(b))
self.assertEqual(expected, a.find_overlapped(rangeset.Range(*b)))
self.assertEqual(expected, a.find_overlapped(rangeset.ValueRange(*(b+['bar']))))
class TestRangeDictMultiDimension(unittest.TestCase):
"""
A sample of 2d mapped value: time(t) and a string range:
This setting split the entire plain into 4 areas.
range
^
|
d +----+----+
| | cd |
c + bd +----+
| | |
b +----+ |
| ab | ac |
a +----+----+
|
'' +----+----+--------> t
0 1 2
"""
inp = [
[0, 1, [['a', 'b', 'ab'],
['b', 'd', 'bd'],
]],
[1, 2, [['a', 'c', 'ac'],
['c', 'd', 'cd'],
]],
]
def test_get(self):
r = rangeset.RangeDict(self.inp, dimension=2)
cases = (
(0.5, 'a', 'ab'),
(0.5, 'c', 'bd'),
(1.5, 'a', 'ac'),
(1.5, 'c', 'cd'),
)
for tm, string, expected in cases:
dd(tm, string, expected)
rst = r.get(tm).get(string)
dd('rst:', rst)
self.assertEqual(expected, rst)
# in one get
rst = r.get(tm, string)
dd('rst:', rst)
self.assertEqual(expected, rst)
# too many args
self.assertRaises(TypeError, r.get, 1, 'a', 1)
def test_add(self):
r = rangeset.RangeDict(self.inp, dimension=2)
r.add([2, None], [['a', 'd', 'ad']])
rst = r.get(2.5, 'b')
self.assertEqual('ad', rst)
self.assertRaises(KeyError, r.get, 2.5, 'e')
def test_substract(self):
r = rangeset.RangeDict(self.inp, dimension=2)
r = rangeset.substract(r, rangeset.RangeDict([[0.5, 1.5, None]]))
dd(r)
self.assertRaises(KeyError, r.get, 2)
self.assertRaises(KeyError, r.get, 2, 'e')
self.assertEqual('bd', r.get(0, 'b'))
self.assertEqual('ac', r.get(1.6, 'b'))
r = rangeset.substract(r, rangeset.RangeDict([[0, 1.5, None]]))
dd(r)
self.assertRaises(KeyError, r.get, 0)
| 42.789522 | 156 | 0.342219 |
ace9b981b6ec905aa592d9613a42e9d2c6cda0f4 | 1,410 | py | Python | spanner/tests/_fixtures.py | rodrigodias27/google-cloud-python | 7d1161f70744c0dbbe67a3f472ea95667eaafe50 | [
"Apache-2.0"
] | 1 | 2021-01-04T11:40:17.000Z | 2021-01-04T11:40:17.000Z | spanner/tests/_fixtures.py | rodrigodias27/google-cloud-python | 7d1161f70744c0dbbe67a3f472ea95667eaafe50 | [
"Apache-2.0"
] | null | null | null | spanner/tests/_fixtures.py | rodrigodias27/google-cloud-python | 7d1161f70744c0dbbe67a3f472ea95667eaafe50 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test fixtures."""
DDL = """\
CREATE TABLE contacts (
contact_id INT64,
first_name STRING(1024),
last_name STRING(1024),
email STRING(1024) )
PRIMARY KEY (contact_id);
CREATE TABLE contact_phones (
contact_id INT64,
phone_type STRING(1024),
phone_number STRING(1024) )
PRIMARY KEY (contact_id, phone_type),
INTERLEAVE IN PARENT contacts ON DELETE CASCADE;
CREATE TABLE all_types (
list_goes_on ARRAY<INT64>,
are_you_sure BOOL,
raw_data BYTES(16),
hwhen DATE,
approx_value FLOAT64,
eye_d INT64,
description STRING(16),
exactly_hwhen TIMESTAMP)
PRIMARY KEY (eye_d);
CREATE TABLE counters (
name STRING(1024),
value INT64 )
PRIMARY KEY (name);
"""
DDL_STATEMENTS = [stmt.strip() for stmt in DDL.split(';') if stmt.strip()]
| 29.375 | 74 | 0.713475 |
ace9b9a29468dfb0bd4af1a0f3ada1b0fa5abb02 | 146 | py | Python | ejercicio-edad-1.py | josulaguna/Python3 | fbdbe609c7ccf06092bbe1073f209b51ce7308a0 | [
"MIT"
] | null | null | null | ejercicio-edad-1.py | josulaguna/Python3 | fbdbe609c7ccf06092bbe1073f209b51ce7308a0 | [
"MIT"
] | null | null | null | ejercicio-edad-1.py | josulaguna/Python3 | fbdbe609c7ccf06092bbe1073f209b51ce7308a0 | [
"MIT"
] | null | null | null | #Josué Laguna Alonso
#07/02/2018
edad = input ("Introduzca su edad: ")
if edad >= 15 and edad <= 17 :
print "puedes entrar en la sesion de tarde"
| 24.333333 | 43 | 0.691781 |
ace9ba220383f14b9f4e6ee3576e07e4312050fc | 726 | py | Python | Sorting Algorithims/stooge_sort.py | Wish1991/Python | 11b407ea1c47f63cb07dbf8cb90df93d2190821f | [
"MIT"
] | 1 | 2022-03-23T23:04:02.000Z | 2022-03-23T23:04:02.000Z | Sorting Algorithims/stooge_sort.py | Wish1991/Python | 11b407ea1c47f63cb07dbf8cb90df93d2190821f | [
"MIT"
] | null | null | null | Sorting Algorithims/stooge_sort.py | Wish1991/Python | 11b407ea1c47f63cb07dbf8cb90df93d2190821f | [
"MIT"
] | 1 | 2022-03-23T23:04:40.000Z | 2022-03-23T23:04:40.000Z | # See what stooge sort dooes
# https://www.youtube.com/watch?v=vIDkfrSdID8
def stooge_sort_(arr, l, h):
if l >= h:
return 0
# If first element is smaller than last, then swap
if arr[l] > arr[h]:
t = arr[l]
arr[l] = arr[h]
arr[h] = t
# If there are more than 2 elements in array
if h - l + 1 > 2:
t = (int)((h - l + 1) / 3)
# Recursively sort first 2 / 3 elements
stooge_sort_(arr, l, (h - t))
# Recursively sort last 2 / 3 elements
stooge_sort_(arr, l + t, (h))
# Recursively sort first 2 / 3 elements
stooge_sort_(arr, l, (h - t))
arr = [2, 4, 5, 3, 1]
n = len(arr)
stooge_sort_(arr, 0, n - 1)
print(arr)
| 20.166667 | 54 | 0.533058 |
ace9ba6d65dc4e44d0800598a12622ac828371f5 | 25,867 | py | Python | heat/tests/api/aws/test_api_ec2token.py | ChameleonCloud/heat | 67cf6eebf4ee59a6baeda8a42bd3cf4de88cbfea | [
"Apache-2.0"
] | 1 | 2018-07-04T07:59:26.000Z | 2018-07-04T07:59:26.000Z | heat/tests/api/aws/test_api_ec2token.py | ljzjohnson/heat | 9e463f4af77513980b1fd215d5d2ad3bf7b979f9 | [
"Apache-2.0"
] | null | null | null | heat/tests/api/aws/test_api_ec2token.py | ljzjohnson/heat | 9e463f4af77513980b1fd215d5d2ad3bf7b979f9 | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from oslo_config import cfg
from oslo_utils import importutils
import requests
import six
from heat.api.aws import ec2token
from heat.api.aws import exception
from heat.common import wsgi
from heat.tests import common
from heat.tests import utils
class Ec2TokenTest(common.HeatTestCase):
"""Tests the Ec2Token middleware."""
def setUp(self):
super(Ec2TokenTest, self).setUp()
self.patchobject(requests, 'post')
def _dummy_GET_request(self, params=None, environ=None):
# Mangle the params dict into a query string
params = params or {}
environ = environ or {}
qs = "&".join(["=".join([k, str(params[k])]) for k in params])
environ.update({'REQUEST_METHOD': 'GET', 'QUERY_STRING': qs})
req = wsgi.Request(environ)
return req
def test_conf_get_paste(self):
dummy_conf = {'auth_uri': 'http://192.0.2.9/v2.0'}
ec2 = ec2token.EC2Token(app=None, conf=dummy_conf)
self.assertEqual('http://192.0.2.9/v2.0', ec2._conf_get('auth_uri'))
self.assertEqual(
'http://192.0.2.9/v2.0/ec2tokens',
ec2._conf_get_keystone_ec2_uri('http://192.0.2.9/v2.0'))
def test_conf_get_opts(self):
cfg.CONF.set_default('auth_uri', 'http://192.0.2.9/v2.0/',
group='ec2authtoken')
cfg.CONF.set_default('auth_uri', 'this-should-be-ignored',
group='clients_keystone')
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('http://192.0.2.9/v2.0/', ec2._conf_get('auth_uri'))
self.assertEqual(
'http://192.0.2.9/v2.0/ec2tokens',
ec2._conf_get_keystone_ec2_uri('http://192.0.2.9/v2.0/'))
def test_conf_get_clients_keystone_opts(self):
cfg.CONF.set_default('auth_uri', None, group='ec2authtoken')
cfg.CONF.set_default('auth_uri', 'http://192.0.2.9',
group='clients_keystone')
with mock.patch('keystoneauth1.discover.Discover') as discover:
class MockDiscover(object):
def url_for(self, endpoint):
return 'http://192.0.2.9/v3/'
discover.return_value = MockDiscover()
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual(
'http://192.0.2.9/v3/ec2tokens',
ec2._conf_get_keystone_ec2_uri('http://192.0.2.9/v3/'))
def test_conf_get_ssl_default_options(self):
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertTrue(ec2.ssl_options['verify'],
"SSL verify should be True by default")
self.assertIsNone(ec2.ssl_options['cert'],
"SSL client cert should be None by default")
def test_conf_ssl_insecure_option(self):
ec2 = ec2token.EC2Token(app=None, conf={})
cfg.CONF.set_default('insecure', 'True', group='ec2authtoken')
cfg.CONF.set_default('ca_file', None, group='ec2authtoken')
self.assertFalse(ec2.ssl_options['verify'])
def test_conf_get_ssl_opts(self):
cfg.CONF.set_default('auth_uri', 'https://192.0.2.9/v2.0/',
group='ec2authtoken')
cfg.CONF.set_default('ca_file', '/home/user/cacert.pem',
group='ec2authtoken')
cfg.CONF.set_default('insecure', 'false', group='ec2authtoken')
cfg.CONF.set_default('cert_file', '/home/user/mycert',
group='ec2authtoken')
cfg.CONF.set_default('key_file', '/home/user/mykey',
group='ec2authtoken')
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('/home/user/cacert.pem', ec2.ssl_options['verify'])
self.assertEqual(('/home/user/mycert', '/home/user/mykey'),
ec2.ssl_options['cert'])
def test_get_signature_param_old(self):
params = {'Signature': 'foo'}
dummy_req = self._dummy_GET_request(params)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_signature(dummy_req))
def test_get_signature_param_new(self):
params = {'X-Amz-Signature': 'foo'}
dummy_req = self._dummy_GET_request(params)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_signature(dummy_req))
def test_get_signature_header_space(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('xyz', ec2._get_signature(dummy_req))
def test_get_signature_header_notlast(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar, '
'Signature=xyz,'
'SignedHeaders=content-type;host;x-amz-date ')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('xyz', ec2._get_signature(dummy_req))
def test_get_signature_header_nospace(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar,'
'SignedHeaders=content-type;host;x-amz-date,'
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('xyz', ec2._get_signature(dummy_req))
def test_get_access_param_old(self):
params = {'AWSAccessKeyId': 'foo'}
dummy_req = self._dummy_GET_request(params)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_access(dummy_req))
def test_get_access_param_new(self):
params = {'X-Amz-Credential': 'foo/bar'}
dummy_req = self._dummy_GET_request(params)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_access(dummy_req))
def test_get_access_header_space(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_access(dummy_req))
def test_get_access_header_nospace(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar,'
'SignedHeaders=content-type;host;x-amz-date,'
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_access(dummy_req))
def test_get_access_header_last(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo '
'SignedHeaders=content-type;host;x-amz-date,'
'Signature=xyz,Credential=foo/bar')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_access(dummy_req))
def test_call_x_auth_user(self):
req_env = {'HTTP_X_AUTH_USER': 'foo'}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app='xyz', conf={})
self.assertEqual('xyz', ec2.__call__(dummy_req))
def test_call_auth_nosig(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app='xyz', conf={})
self.assertRaises(exception.HeatIncompleteSignatureError,
ec2.__call__, dummy_req)
def test_call_auth_nouser(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo '
'SignedHeaders=content-type;host;x-amz-date,'
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app='xyz', conf={})
self.assertRaises(exception.HeatMissingAuthenticationTokenError,
ec2.__call__, dummy_req)
def test_call_auth_noaccess(self):
# If there's no accesskey in params or header, but there is a
# Signature, we expect HeatMissingAuthenticationTokenError
params = {'Signature': 'foo'}
dummy_req = self._dummy_GET_request(params)
ec2 = ec2token.EC2Token(app='xyz', conf={})
self.assertRaises(exception.HeatMissingAuthenticationTokenError,
ec2.__call__, dummy_req)
def test_call_x_auth_nouser_x_auth_user(self):
req_env = {'HTTP_X_AUTH_USER': 'foo',
'HTTP_AUTHORIZATION':
('Authorization: foo '
'SignedHeaders=content-type;host;x-amz-date,'
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app='xyz', conf={})
self.assertEqual('xyz', ec2.__call__(dummy_req))
def _stub_http_connection(self, headers=None, params=None, response=None,
req_url='http://123:5000/v3/ec2tokens',
verify=True, cert=None, direct_mock=True):
headers = headers or {}
params = params or {}
class DummyHTTPResponse(object):
text = response
headers = {'X-Subject-Token': 123}
def json(self):
return json.loads(self.text)
body_hash = ('e3b0c44298fc1c149afbf4c8996fb9'
'2427ae41e4649b934ca495991b7852b855')
req_creds = {
"ec2Credentials": {
"access": "foo",
"headers": headers,
"host": "heat:8000",
"verb": "GET",
"params": params,
"signature": "xyz",
"path": "/v1",
"body_hash": body_hash
}
}
req_headers = {'Content-Type': 'application/json'}
self.verify_req_url = req_url
self.verify_data = utils.JsonRepr(req_creds)
self.verify_verify = verify
self.verify_cert = cert
self.verify_req_headers = req_headers
if direct_mock:
requests.post.return_value = DummyHTTPResponse()
else:
return DummyHTTPResponse()
def test_call_ok(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
auth_str = ('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1',
'HTTP_AUTHORIZATION': auth_str}
dummy_req = self._dummy_GET_request(environ=req_env)
ok_resp = json.dumps({'token': {
'project': {'name': 'tenant', 'id': 'abcd1234'}}})
self._stub_http_connection(headers={'Authorization': auth_str},
response=ok_resp)
self.assertEqual('woot', ec2.__call__(dummy_req))
self.assertEqual('tenant', dummy_req.headers['X-Tenant-Name'])
self.assertEqual('abcd1234', dummy_req.headers['X-Tenant-Id'])
requests.post.assert_called_once_with(
self.verify_req_url, data=self.verify_data,
verify=self.verify_verify,
cert=self.verify_cert, headers=self.verify_req_headers)
def test_call_ok_roles(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
auth_str = ('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1',
'HTTP_AUTHORIZATION': auth_str}
dummy_req = self._dummy_GET_request(environ=req_env)
ok_resp = json.dumps({
'token': {
'id': 123,
'project': {'name': 'tenant', 'id': 'abcd1234'},
'roles': [{'name': 'aa'}, {'name': 'bb'}, {'name': 'cc'}]}
})
self._stub_http_connection(headers={'Authorization': auth_str},
response=ok_resp)
self.assertEqual('woot', ec2.__call__(dummy_req))
self.assertEqual('aa,bb,cc', dummy_req.headers['X-Roles'])
requests.post.assert_called_once_with(
self.verify_req_url, data=self.verify_data,
verify=self.verify_verify,
cert=self.verify_cert, headers=self.verify_req_headers)
def test_call_err_tokenid(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0/'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
auth_str = ('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1',
'HTTP_AUTHORIZATION': auth_str}
dummy_req = self._dummy_GET_request(environ=req_env)
err_msg = "EC2 access key not found."
err_resp = json.dumps({'error': {'message': err_msg}})
self._stub_http_connection(headers={'Authorization': auth_str},
response=err_resp)
self.assertRaises(exception.HeatInvalidClientTokenIdError,
ec2.__call__, dummy_req)
requests.post.assert_called_once_with(
self.verify_req_url, data=self.verify_data,
verify=self.verify_verify,
cert=self.verify_cert, headers=self.verify_req_headers)
def test_call_err_signature(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
auth_str = ('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1',
'HTTP_AUTHORIZATION': auth_str}
dummy_req = self._dummy_GET_request(environ=req_env)
err_msg = "EC2 signature not supplied."
err_resp = json.dumps({'error': {'message': err_msg}})
self._stub_http_connection(headers={'Authorization': auth_str},
response=err_resp)
self.assertRaises(exception.HeatSignatureError,
ec2.__call__, dummy_req)
requests.post.assert_called_once_with(
self.verify_req_url, data=self.verify_data,
verify=self.verify_verify,
cert=self.verify_cert, headers=self.verify_req_headers)
def test_call_err_denied(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
auth_str = ('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1',
'HTTP_AUTHORIZATION': auth_str}
dummy_req = self._dummy_GET_request(environ=req_env)
err_resp = json.dumps({})
self._stub_http_connection(headers={'Authorization': auth_str},
response=err_resp)
self.assertRaises(exception.HeatAccessDeniedError,
ec2.__call__, dummy_req)
requests.post.assert_called_once_with(
self.verify_req_url, data=self.verify_data,
verify=self.verify_verify,
cert=self.verify_cert, headers=self.verify_req_headers)
def test_call_ok_v2(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
ok_resp = json.dumps({'token': {
'project': {'name': 'tenant', 'id': 'abcd1234'}}})
self._stub_http_connection(response=ok_resp,
params={'AWSAccessKeyId': 'foo'})
self.assertEqual('woot', ec2.__call__(dummy_req))
requests.post.assert_called_once_with(
self.verify_req_url, data=self.verify_data,
verify=self.verify_verify,
cert=self.verify_cert, headers=self.verify_req_headers)
def test_call_ok_multicloud(self):
dummy_conf = {
'allowed_auth_uris': [
'http://123:5000/v2.0', 'http://456:5000/v2.0'],
'multi_cloud': True
}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
ok_resp = json.dumps({'token': {
'project': {'name': 'tenant', 'id': 'abcd1234'}}})
err_msg = "EC2 access key not found."
err_resp = json.dumps({'error': {'message': err_msg}})
# first request fails
m_p = self._stub_http_connection(
req_url='http://123:5000/v2.0/ec2tokens',
response=err_resp,
params={'AWSAccessKeyId': 'foo'}, direct_mock=False)
# second request passes
m_p2 = self._stub_http_connection(
req_url='http://456:5000/v2.0/ec2tokens',
response=ok_resp,
params={'AWSAccessKeyId': 'foo'}, direct_mock=False)
requests.post.side_effect = [m_p, m_p2]
self.assertEqual('woot', ec2.__call__(dummy_req))
self.assertEqual(2, requests.post.call_count)
requests.post.assert_called_with(
self.verify_req_url, data=self.verify_data,
verify=self.verify_verify,
cert=self.verify_cert, headers=self.verify_req_headers)
def test_call_err_multicloud(self):
dummy_conf = {
'allowed_auth_uris': [
'http://123:5000/v2.0', 'http://456:5000/v2.0'],
'multi_cloud': True
}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
err_resp1 = json.dumps({})
err_msg2 = "EC2 access key not found."
err_resp2 = json.dumps({'error': {'message': err_msg2}})
# first request fails with HeatAccessDeniedError
m_p = self._stub_http_connection(
req_url='http://123:5000/v2.0/ec2tokens',
response=err_resp1,
params={'AWSAccessKeyId': 'foo'}, direct_mock=False)
# second request fails with HeatInvalidClientTokenIdError
m_p2 = self._stub_http_connection(
req_url='http://456:5000/v2.0/ec2tokens',
response=err_resp2,
params={'AWSAccessKeyId': 'foo'}, direct_mock=False)
requests.post.side_effect = [m_p, m_p2]
# raised error matches last failure
self.assertRaises(exception.HeatInvalidClientTokenIdError,
ec2.__call__, dummy_req)
self.assertEqual(2, requests.post.call_count)
requests.post.assert_called_with(
self.verify_req_url, data=self.verify_data,
verify=self.verify_verify,
cert=self.verify_cert, headers=self.verify_req_headers)
def test_call_err_multicloud_none_allowed(self):
dummy_conf = {
'allowed_auth_uris': [],
'multi_cloud': True
}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
self.assertRaises(exception.HeatAccessDeniedError,
ec2.__call__, dummy_req)
def test_call_badconf_no_authuri(self):
ec2 = ec2token.EC2Token(app='woot', conf={})
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
ex = self.assertRaises(exception.HeatInternalFailureError,
ec2.__call__, dummy_req)
self.assertEqual('Service misconfigured', six.text_type(ex))
def test_call_ok_auth_uri_ec2authtoken(self):
dummy_url = 'http://123:5000/v2.0'
cfg.CONF.set_default('auth_uri', dummy_url, group='ec2authtoken')
ec2 = ec2token.EC2Token(app='woot', conf={})
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
ok_resp = json.dumps({'token': {
'project': {'name': 'tenant', 'id': 'abcd1234'}}})
self._stub_http_connection(response=ok_resp,
params={'AWSAccessKeyId': 'foo'})
self.assertEqual('woot', ec2.__call__(dummy_req))
requests.post.assert_called_with(
self.verify_req_url, data=self.verify_data,
verify=self.verify_verify,
cert=self.verify_cert, headers=self.verify_req_headers)
def test_call_ok_auth_uri_ec2authtoken_long(self):
# Prove we tolerate a url which already includes the /ec2tokens path
dummy_url = 'http://123:5000/v2.0/ec2tokens'
cfg.CONF.set_default('auth_uri', dummy_url, group='ec2authtoken')
ec2 = ec2token.EC2Token(app='woot', conf={})
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
ok_resp = json.dumps({'token': {
'project': {'name': 'tenant', 'id': 'abcd1234'}}})
self._stub_http_connection(response=ok_resp,
params={'AWSAccessKeyId': 'foo'})
self.assertEqual('woot', ec2.__call__(dummy_req))
requests.post.assert_called_with(
self.verify_req_url, data=self.verify_data,
verify=self.verify_verify,
cert=self.verify_cert, headers=self.verify_req_headers)
def test_call_ok_auth_uri_ks_authtoken(self):
# Import auth_token to have keystone_authtoken settings setup.
importutils.import_module('keystonemiddleware.auth_token')
dummy_url = 'http://123:5000/v2.0'
cfg.CONF.set_override('auth_uri', dummy_url,
group='keystone_authtoken')
ec2 = ec2token.EC2Token(app='woot', conf={})
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
ok_resp = json.dumps({'token': {
'project': {'name': 'tenant', 'id': 'abcd1234'}}})
self._stub_http_connection(response=ok_resp,
params={'AWSAccessKeyId': 'foo'})
self.assertEqual('woot', ec2.__call__(dummy_req))
requests.post.assert_called_with(
self.verify_req_url, data=self.verify_data,
verify=self.verify_verify,
cert=self.verify_cert, headers=self.verify_req_headers)
def test_filter_factory(self):
ec2_filter = ec2token.EC2Token_filter_factory(global_conf={})
self.assertEqual('xyz', ec2_filter('xyz').application)
def test_filter_factory_none_app(self):
ec2_filter = ec2token.EC2Token_filter_factory(global_conf={})
self.assertIsNone(ec2_filter(None).application)
| 42.897181 | 78 | 0.592531 |
ace9bba283ae8fa2440568f74a965636e77d47dd | 52 | py | Python | TWLight/users/handshakers.py | jajodiaraghav/TWLight | 22359ab0b95ee3653e8ffa0eb698acd7bb8ebf70 | [
"MIT"
] | 1 | 2019-10-24T04:49:52.000Z | 2019-10-24T04:49:52.000Z | TWLight/users/handshakers.py | jajodiaraghav/TWLight | 22359ab0b95ee3653e8ffa0eb698acd7bb8ebf70 | [
"MIT"
] | 1 | 2019-03-29T15:29:45.000Z | 2019-03-29T15:57:20.000Z | TWLight/users/handshakers.py | jajodiaraghav/TWLight | 22359ab0b95ee3653e8ffa0eb698acd7bb8ebf70 | [
"MIT"
] | 1 | 2019-09-26T14:40:27.000Z | 2019-09-26T14:40:27.000Z | from .helpers import wiki_list
handshaker_dict = {} | 17.333333 | 30 | 0.788462 |
ace9bbf392596086095e383347438c6f1ed711e9 | 403 | py | Python | authentication/migrations/0003_user_official_page.py | jatingupta14/cruzz | 9a00f1555cdd5c76c9ef250d7037d72d725de367 | [
"MIT"
] | 7 | 2018-11-09T14:40:54.000Z | 2019-12-20T08:10:17.000Z | authentication/migrations/0003_user_official_page.py | jatingupta14/cruzz | 9a00f1555cdd5c76c9ef250d7037d72d725de367 | [
"MIT"
] | 25 | 2018-11-30T17:38:36.000Z | 2018-12-27T17:21:09.000Z | authentication/migrations/0003_user_official_page.py | jatingupta14/cruzz | 9a00f1555cdd5c76c9ef250d7037d72d725de367 | [
"MIT"
] | 6 | 2018-12-03T14:44:29.000Z | 2018-12-26T11:49:43.000Z | # Generated by Django 2.0.6 on 2018-11-08 13:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0002_auto_20181106_1012'),
]
operations = [
migrations.AddField(
model_name='user',
name='official_page',
field=models.NullBooleanField(default=False),
),
]
| 21.210526 | 57 | 0.615385 |
ace9bc1fd2eae82ec125cbcfc2a6a72eccf2a92d | 9,157 | py | Python | oscar/lib/python2.7/site-packages/django_extensions/management/commands/runscript.py | AMuratTuran/mkn | 557086426773ced10d82c969304bd349414a601e | [
"BSD-3-Clause"
] | 1 | 2019-03-11T17:10:06.000Z | 2019-03-11T17:10:06.000Z | oscar/lib/python2.7/site-packages/django_extensions/management/commands/runscript.py | AMuratTuran/mkn | 557086426773ced10d82c969304bd349414a601e | [
"BSD-3-Clause"
] | 7 | 2020-03-24T16:20:07.000Z | 2021-12-13T19:51:29.000Z | venv/lib/python3.6/site-packages/django_extensions/management/commands/runscript.py | fkallin/Superbook | a5874d3fbe04085cc4e62051c44a178a61dc289a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import importlib
import inspect
import os
import traceback
from argparse import ArgumentTypeError
from django.apps import apps
from django.core.management.base import CommandError
from django_extensions.management.email_notifications import EmailNotificationCommand
from django_extensions.management.utils import signalcommand
class DirPolicyChoices:
NONE = 'none'
EACH = 'each'
ROOT = 'root'
def check_is_directory(value):
if value is None or not os.path.isdir(value):
raise ArgumentTypeError("%s is not a directory!" % value)
return value
class BadCustomDirectoryException(Exception):
def __init__(self, value):
self.message = value + ' If --dir-policy is custom than you must set correct directory in ' \
'--dir option or in settings.RUNSCRIPT_CHDIR'
def __str__(self):
return self.message
class Command(EmailNotificationCommand):
help = 'Runs a script in django context.'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.current_directory = os.getcwd()
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('script', nargs='+')
parser.add_argument(
'--fixtures', action='store_true', dest='infixtures', default=False,
help='Only look in app.fixtures subdir',
)
parser.add_argument(
'--noscripts', action='store_true', dest='noscripts', default=False,
help='Look in app.scripts subdir',
)
parser.add_argument(
'-s', '--silent', action='store_true', dest='silent', default=False,
help='Run silently, do not show errors and tracebacks',
)
parser.add_argument(
'--no-traceback', action='store_true', dest='no_traceback', default=False,
help='Do not show tracebacks',
)
parser.add_argument(
'--script-args', nargs='*', type=str,
help='Space-separated argument list to be passed to the scripts. Note that the '
'same arguments will be passed to all named scripts.',
)
parser.add_argument(
'--dir-policy', type=str,
choices=[DirPolicyChoices.NONE, DirPolicyChoices.EACH, DirPolicyChoices.ROOT],
help='Policy of selecting scripts execution directory: '
'none - start all scripts in current directory '
'each - start all scripts in their directories '
'root - start all scripts in BASE_DIR directory ',
)
parser.add_argument(
'--chdir', type=check_is_directory,
help='If dir-policy option is set to custom, than this option determines script execution directory.',
)
@signalcommand
def handle(self, *args, **options):
from django.conf import settings
NOTICE = self.style.SQL_TABLE
NOTICE2 = self.style.SQL_FIELD
ERROR = self.style.ERROR
ERROR2 = self.style.NOTICE
subdirs = []
scripts = options['script']
if not options.get('noscripts'):
subdirs.append('scripts')
if options.get('infixtures'):
subdirs.append('fixtures')
verbosity = int(options.get('verbosity', 1))
show_traceback = options.get('traceback', False)
no_traceback = options.get('no_traceback', False)
if no_traceback:
show_traceback = False
else:
show_traceback = True
silent = options.get('silent', False)
if silent:
verbosity = 0
email_notifications = options.get('email_notifications', False)
if len(subdirs) < 1:
print(NOTICE("No subdirs to run left."))
return
if len(scripts) < 1:
print(ERROR("Script name required."))
return
def get_directory_from_chdir():
directory = options.get('chdir') or getattr(settings, 'RUNSCRIPT_CHDIR', None)
try:
check_is_directory(directory)
except ArgumentTypeError as e:
raise BadCustomDirectoryException(str(e))
return directory
def get_directory_basing_on_policy(script_module):
policy = options.get('dir_policy') or getattr(settings, 'RUNSCRIPT_CHDIR_POLICY', DirPolicyChoices.NONE)
if policy == DirPolicyChoices.ROOT:
return settings.BASE_DIR
elif policy == DirPolicyChoices.EACH:
return os.path.dirname(inspect.getfile(script_module))
else:
return self.current_directory
def set_directory(script_module):
if options.get('chdir'):
directory = get_directory_from_chdir()
elif options.get('dir_policy'):
directory = get_directory_basing_on_policy(script_module)
elif getattr(settings, 'RUNSCRIPT_CHDIR', None):
directory = get_directory_from_chdir()
else:
directory = get_directory_basing_on_policy(script_module)
os.chdir(os.path.abspath(directory))
def run_script(mod, *script_args):
try:
set_directory(mod)
mod.run(*script_args)
if email_notifications:
self.send_email_notification(notification_id=mod.__name__)
except Exception as e:
if silent:
return
if verbosity > 0:
print(ERROR("Exception while running run() in '%s'" % mod.__name__))
if email_notifications:
self.send_email_notification(
notification_id=mod.__name__, include_traceback=True)
if show_traceback:
if not isinstance(e, CommandError):
raise
def my_import(parent_package, module_name):
full_module_path = "%s.%s" % (parent_package, module_name)
if verbosity > 1:
print(NOTICE("Check for %s" % full_module_path))
# Try importing the parent package first
try:
importlib.import_module(parent_package)
except ImportError as e:
if str(e).startswith('No module named'):
# No need to proceed if the parent package doesn't exist
return False
try:
t = importlib.import_module(full_module_path)
except ImportError as e:
# The parent package exists, but the module doesn't
module_file = os.path.join(settings.BASE_DIR, *full_module_path.split('.')) + '.py'
if not os.path.isfile(module_file):
return False
if silent:
return False
if show_traceback:
traceback.print_exc()
if verbosity > 0:
print(ERROR("Cannot import module '%s': %s." % (full_module_path, e)))
return False
if hasattr(t, "run"):
if verbosity > 1:
print(NOTICE2("Found script '%s' ..." % full_module_path))
return t
else:
if verbosity > 1:
print(ERROR2("Found script '%s' but no run() function found." % full_module_path))
def find_modules_for_script(script):
""" find script module which contains 'run' attribute """
modules = []
# first look in apps
for app in apps.get_app_configs():
for subdir in subdirs:
mod = my_import("%s.%s" % (app.name, subdir), script)
if mod:
modules.append(mod)
# try direct import
if script.find(".") != -1:
parent, mod_name = script.rsplit(".", 1)
mod = my_import(parent, mod_name)
if mod:
modules.append(mod)
else:
# try app.DIR.script import
for subdir in subdirs:
mod = my_import(subdir, script)
if mod:
modules.append(mod)
return modules
if options.get('script_args'):
script_args = options['script_args']
else:
script_args = []
for script in scripts:
modules = find_modules_for_script(script)
if not modules:
if verbosity > 0 and not silent:
print(ERROR("No (valid) module for script '%s' found" % script))
if verbosity < 2:
print(ERROR("Try running with a higher verbosity level like: -v2 or -v3"))
for mod in modules:
if verbosity > 1:
print(NOTICE2("Running script '%s' ..." % mod.__name__))
run_script(mod, *script_args)
| 38.313808 | 116 | 0.561865 |
ace9bd82e891c6311bc020244eef6a69e3a05c0c | 350 | py | Python | tests/unittest/test_app.py | vyahello/fake-cars-api | 13c7325a7d8779d4b2e5ce60d5664b843c891cb6 | [
"MIT"
] | null | null | null | tests/unittest/test_app.py | vyahello/fake-cars-api | 13c7325a7d8779d4b2e5ce60d5664b843c891cb6 | [
"MIT"
] | 3 | 2019-11-22T20:56:17.000Z | 2021-09-15T08:18:30.000Z | tests/unittest/test_app.py | vyahello/fake-vehicles-api | 13c7325a7d8779d4b2e5ce60d5664b843c891cb6 | [
"MIT"
] | null | null | null | from apistar import Route, App
from api.app import ROUTES, api_app
def test_count_routes() -> None:
assert len(ROUTES) == 7
def test_first_route_type() -> None:
assert isinstance(ROUTES[0], Route)
def test_last_route_type() -> None:
assert isinstance(ROUTES[-1], Route)
def test_app() -> None:
assert isinstance(api_app, App)
| 18.421053 | 40 | 0.702857 |
ace9bd877b4e7e7b1450c38c877124028008a1b8 | 27,960 | py | Python | src/python/dxpy/bindings/dxfile_functions.py | yesimon/dx-toolkit | c13a16d570a55bde7778d6db9268f5c3fca81d0f | [
"Apache-2.0"
] | null | null | null | src/python/dxpy/bindings/dxfile_functions.py | yesimon/dx-toolkit | c13a16d570a55bde7778d6db9268f5c3fca81d0f | [
"Apache-2.0"
] | null | null | null | src/python/dxpy/bindings/dxfile_functions.py | yesimon/dx-toolkit | c13a16d570a55bde7778d6db9268f5c3fca81d0f | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
Helper Functions
****************
The following helper functions are useful shortcuts for interacting with File objects.
'''
from __future__ import print_function, unicode_literals, division, absolute_import
import os, sys, math, mmap, stat
import hashlib
import traceback
import warnings
from collections import defaultdict
import multiprocessing
import dxpy
from .. import logger
from . import dxfile, DXFile
from .dxfile import FILE_REQUEST_TIMEOUT
from ..compat import open, USING_PYTHON2
from ..exceptions import DXFileError, DXPartLengthMismatchError, DXChecksumMismatchError, DXIncompleteReadsError, err_exit
from ..utils import response_iterator
import subprocess
def open_dxfile(dxid, project=None, mode=None, read_buffer_size=dxfile.DEFAULT_BUFFER_SIZE):
'''
:param dxid: file ID
:type dxid: string
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Given the object ID of an uploaded file, returns a remote file
handler that is a Python file-like object.
Example::
with open_dxfile("file-xxxx") as fd:
for line in fd:
...
Note that this is shorthand for::
DXFile(dxid)
'''
return DXFile(dxid, project=project, mode=mode, read_buffer_size=read_buffer_size)
def new_dxfile(mode=None, write_buffer_size=dxfile.DEFAULT_BUFFER_SIZE, expected_file_size=None, file_is_mmapd=False,
**kwargs):
'''
:param mode: One of "w" or "a" for write and append modes, respectively
:type mode: string
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Creates a new remote file object that is ready to be written to;
returns a :class:`~dxpy.bindings.dxfile.DXFile` object that is a
writable file-like object.
Example::
with new_dxfile(media_type="application/json") as fd:
fd.write("foo\\n")
Note that this is shorthand for::
dxFile = DXFile()
dxFile.new(**kwargs)
'''
dx_file = DXFile(mode=mode, write_buffer_size=write_buffer_size, expected_file_size=expected_file_size,
file_is_mmapd=file_is_mmapd)
dx_file.new(**kwargs)
return dx_file
def download_dxfile(dxid, filename, chunksize=dxfile.DEFAULT_BUFFER_SIZE, append=False, show_progress=False,
project=None, **kwargs):
'''
:param dxid: DNAnexus file ID or DXFile (file handler) object
:type dxid: string or DXFile
:param filename: Local filename
:type filename: string
:param append: If True, appends to the local file (default is to truncate local file if it exists)
:type append: boolean
:param project: project to use as context for this download (may affect
which billing account is billed for this download). If None or
DXFile.NO_PROJECT_HINT, no project hint is supplied to the API server.
:type project: str or None
Downloads the remote file referenced by *dxid* and saves it to *filename*.
Example::
download_dxfile("file-xxxx", "localfilename.fastq")
'''
# retry the inner loop while there are retriable errors
part_retry_counter = defaultdict(lambda: 3)
success = False
while not success:
success = _download_dxfile(dxid, filename, part_retry_counter,
chunksize=chunksize, append=append,
show_progress=show_progress, project=project, **kwargs)
# Check if a program (wget, curl, etc.) is on the path, and
# can be called.
def _which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# Caluclate the md5 checkum for [filename], and raise
# an exception if the checksum is wrong.
def _verify(filename, md5digest):
md5sum_exe = _which("md5sum")
if md5sum_exe is None:
err_exit("md5sum is not installed on this system")
cmd = [md5sum_exe, "-b", filename]
try:
print("Calculating checksum")
cmd_out = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
err_exit("Failed to run md5sum: " + str(cmd))
line = cmd_out.strip().split()
if len(line) != 2:
err_exit("md5sum returned weird results: " + str(line))
actual_md5 = line[0]
md5digest = md5digest.encode("ascii")
# python-3 : both digests have to be in bytes
if actual_md5 != md5digest:
err_exit("Checksum doesn't match " + str(actual_md5) + " expected:" + str(md5digest))
print("Checksum correct")
# [dxid] is a symbolic link. Create a preauthenticated URL,
# and download it
def _download_symbolic_link(dxid, md5digest, project, dest_filename):
dxfile = dxpy.DXFile(dxid)
url, _headers = dxfile.get_download_url(preauthenticated=True,
duration=6*3600,
project=project)
# Follow the redirection
print('Following redirect for ' + url)
# Check if aria2 present
# Use that instead of wget
aria2c_exe = _which("aria2c")
if aria2c_exe is None:
wget_exe = _which("wget")
if wget_exe is None:
err_exit("wget is not installed on this system")
cmd = ["wget", "--tries=5", "--quiet"]
if os.path.isfile(dxid):
# file already exists, resume upload.
cmd += ["--continue"]
cmd += ["-O", dest_filename, url]
else:
print("aria2c found in path so using that instead of wget \n")
# aria2c does not allow more than 16 connections per server
max_connections = min(16, multiprocessing.cpu_count())
cmd = ["aria2c", "--check-certificate=false", "-s", str(max_connections), "-x", str(max_connections)]
# Split path properly for aria2c
# If '-d' arg not provided, aria2c uses current working directory
cwd = os.getcwd()
directory, filename = os.path.split(dest_filename)
directory = cwd if directory in ["", cwd] else directory
cmd += ["-o", filename, "-d", os.path.abspath(directory), url]
try:
if aria2c_exe is not None:
print("Downloading symbolic link with aria2c")
else:
print("Downloading symbolic link with wget")
subprocess.check_call(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
msg = ""
if e and e.output:
msg = e.output.strip()
err_exit("Failed to call download: {cmd}\n{msg}\n".format(cmd=str(cmd), msg=msg))
if md5digest is not None:
_verify(dest_filename, md5digest)
def _download_dxfile(dxid, filename, part_retry_counter,
chunksize=dxfile.DEFAULT_BUFFER_SIZE, append=False, show_progress=False,
project=None, **kwargs):
'''
Core of download logic. Download file-id *dxid* and store it in
a local file *filename*.
The return value is as follows:
- True means the download was successfully completed
- False means the download was stopped because of a retryable error
- Exception raised for other errors
'''
def print_progress(bytes_downloaded, file_size, action="Downloaded"):
num_ticks = 60
effective_file_size = file_size or 1
if bytes_downloaded > effective_file_size:
effective_file_size = bytes_downloaded
ticks = int(round((bytes_downloaded / float(effective_file_size)) * num_ticks))
percent = int(math.floor((bytes_downloaded / float(effective_file_size)) * 100))
fmt = "[{done}{pending}] {action} {done_bytes:,}{remaining} bytes ({percent}%) {name}"
# Erase the line and return the cursor to the start of the line.
# The following VT100 escape sequence will erase the current line.
sys.stderr.write("\33[2K")
sys.stderr.write(fmt.format(action=action,
done=("=" * (ticks - 1) + ">") if ticks > 0 else "",
pending=" " * (num_ticks - ticks),
done_bytes=bytes_downloaded,
remaining=" of {size:,}".format(size=file_size) if file_size else "",
percent=percent,
name=filename))
sys.stderr.flush()
sys.stderr.write("\r")
sys.stderr.flush()
_bytes = 0
if isinstance(dxid, DXFile):
dxfile = dxid
else:
dxfile = DXFile(dxid, mode="r", project=(project if project != DXFile.NO_PROJECT_HINT else None))
dxfile_desc = dxfile.describe(fields={"parts"}, default_fields=True, **kwargs)
from pprint import pprint
if 'drive' in dxfile_desc:
# A symbolic link. Get the MD5 checksum, if we have it
if 'md5' in dxfile_desc:
md5 = dxfile_desc['md5']
else:
md5 = None
_download_symbolic_link(dxid, md5, project, filename)
return True
parts = dxfile_desc["parts"]
parts_to_get = sorted(parts, key=int)
file_size = dxfile_desc.get("size")
offset = 0
for part_id in parts_to_get:
parts[part_id]["start"] = offset
offset += parts[part_id]["size"]
if append:
fh = open(filename, "ab")
else:
try:
fh = open(filename, "rb+")
except IOError:
fh = open(filename, "wb")
if show_progress:
print_progress(0, None)
def get_chunk(part_id_to_get, start, end):
url, headers = dxfile.get_download_url(project=project, **kwargs)
# If we're fetching the whole object in one shot, avoid setting the Range header to take advantage of gzip
# transfer compression
sub_range = False
if len(parts) > 1 or (start > 0) or (end - start + 1 < parts[part_id_to_get]["size"]):
sub_range = True
data = dxpy._dxhttp_read_range(url, headers, start, end, FILE_REQUEST_TIMEOUT, sub_range)
return part_id_to_get, data
def chunk_requests():
for part_id_to_chunk in parts_to_get:
part_info = parts[part_id_to_chunk]
for chunk_start in range(part_info["start"], part_info["start"] + part_info["size"], chunksize):
chunk_end = min(chunk_start + chunksize, part_info["start"] + part_info["size"]) - 1
yield get_chunk, [part_id_to_chunk, chunk_start, chunk_end], {}
def verify_part(_part_id, got_bytes, hasher):
if got_bytes is not None and got_bytes != parts[_part_id]["size"]:
msg = "Unexpected part data size in {} part {} (expected {}, got {})"
msg = msg.format(dxfile.get_id(), _part_id, parts[_part_id]["size"], got_bytes)
raise DXPartLengthMismatchError(msg)
if hasher is not None and "md5" not in parts[_part_id]:
warnings.warn("Download of file {} is not being checked for integrity".format(dxfile.get_id()))
elif hasher is not None and hasher.hexdigest() != parts[_part_id]["md5"]:
msg = "Checksum mismatch in {} part {} (expected {}, got {})"
msg = msg.format(dxfile.get_id(), _part_id, parts[_part_id]["md5"], hasher.hexdigest())
raise DXChecksumMismatchError(msg)
with fh:
last_verified_pos = 0
if fh.mode == "rb+":
# We already downloaded the beginning of the file, verify that the
# chunk checksums match the metadata.
last_verified_part, max_verify_chunk_size = None, 1024*1024
try:
for part_id in parts_to_get:
part_info = parts[part_id]
if "md5" not in part_info:
raise DXFileError("File {} does not contain part md5 checksums".format(dxfile.get_id()))
bytes_to_read = part_info["size"]
hasher = hashlib.md5()
while bytes_to_read > 0:
chunk = fh.read(min(max_verify_chunk_size, bytes_to_read))
if len(chunk) < min(max_verify_chunk_size, bytes_to_read):
raise DXFileError("Local data for part {} is truncated".format(part_id))
hasher.update(chunk)
bytes_to_read -= max_verify_chunk_size
if hasher.hexdigest() != part_info["md5"]:
raise DXFileError("Checksum mismatch when verifying downloaded part {}".format(part_id))
else:
last_verified_part = part_id
last_verified_pos = fh.tell()
if show_progress:
_bytes += part_info["size"]
print_progress(_bytes, file_size, action="Verified")
except (IOError, DXFileError) as e:
logger.debug(e)
fh.seek(last_verified_pos)
fh.truncate()
if last_verified_part is not None:
del parts_to_get[:parts_to_get.index(last_verified_part)+1]
if show_progress and len(parts_to_get) < len(parts):
print_progress(last_verified_pos, file_size, action="Resuming at")
logger.debug("Verified %s/%d downloaded parts", last_verified_part, len(parts_to_get))
try:
# Main loop. In parallel: download chunks, verify them, and write them to disk.
get_first_chunk_sequentially = (file_size > 128 * 1024 and last_verified_pos == 0 and dxpy.JOB_ID)
cur_part, got_bytes, hasher = None, None, None
for chunk_part, chunk_data in response_iterator(chunk_requests(),
dxfile._http_threadpool,
do_first_task_sequentially=get_first_chunk_sequentially):
if chunk_part != cur_part:
verify_part(cur_part, got_bytes, hasher)
cur_part, got_bytes, hasher = chunk_part, 0, hashlib.md5()
got_bytes += len(chunk_data)
hasher.update(chunk_data)
fh.write(chunk_data)
if show_progress:
_bytes += len(chunk_data)
print_progress(_bytes, file_size)
verify_part(cur_part, got_bytes, hasher)
if show_progress:
print_progress(_bytes, file_size, action="Completed")
except DXFileError:
print(traceback.format_exc(), file=sys.stderr)
part_retry_counter[cur_part] -= 1
if part_retry_counter[cur_part] > 0:
print("Retrying {} ({} tries remain for part {})".format(dxfile.get_id(), part_retry_counter[cur_part], cur_part),
file=sys.stderr)
return False
raise
if show_progress:
sys.stderr.write("\n")
return True
def upload_local_file(filename=None, file=None, media_type=None, keep_open=False,
wait_on_close=False, use_existing_dxfile=None, show_progress=False,
write_buffer_size=None, multithread=True, **kwargs):
'''
:param filename: Local filename
:type filename: string
:param file: File-like object
:type file: File-like object
:param media_type: Internet Media Type
:type media_type: string
:param keep_open: If False, closes the file after uploading
:type keep_open: boolean
:param write_buffer_size: Buffer size to use for upload
:type write_buffer_size: int
:param wait_on_close: If True, waits for the file to close
:type wait_on_close: boolean
:param use_existing_dxfile: Instead of creating a new file object, upload to the specified file
:type use_existing_dxfile: :class:`~dxpy.bindings.dxfile.DXFile`
:param multithread: If True, sends multiple write requests asynchronously
:type multithread: boolean
:returns: Remote file handler
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Exactly one of *filename* or *file* is required.
Uploads *filename* or reads from *file* into a new file object (with
media type *media_type* if given) and returns the associated remote
file handler. The "name" property of the newly created remote file
is set to the basename of *filename* or to *file.name* (if it
exists).
Examples::
# Upload from a path
dxpy.upload_local_file("/home/ubuntu/reads.fastq.gz")
# Upload from a file-like object
with open("reads.fastq") as fh:
dxpy.upload_local_file(file=fh)
'''
fd = file if filename is None else open(filename, 'rb')
try:
file_size = os.fstat(fd.fileno()).st_size
except:
file_size = 0
file_is_mmapd = hasattr(fd, "fileno")
if write_buffer_size is None:
write_buffer_size=dxfile.DEFAULT_BUFFER_SIZE
if use_existing_dxfile:
handler = use_existing_dxfile
else:
# Set a reasonable name for the file if none has been set
# already
creation_kwargs = kwargs.copy()
if 'name' not in kwargs:
if filename is not None:
creation_kwargs['name'] = os.path.basename(filename)
else:
# Try to get filename from file-like object
try:
local_file_name = file.name
except AttributeError:
pass
else:
creation_kwargs['name'] = os.path.basename(local_file_name)
# Use 'a' mode because we will be responsible for closing the file
# ourselves later (if requested).
handler = new_dxfile(mode='a', media_type=media_type, write_buffer_size=write_buffer_size,
expected_file_size=file_size, file_is_mmapd=file_is_mmapd, **creation_kwargs)
# For subsequent API calls, don't supply the dataobject metadata
# parameters that are only needed at creation time.
_, remaining_kwargs = dxpy.DXDataObject._get_creation_params(kwargs)
num_ticks = 60
offset = 0
handler._ensure_write_bufsize(**remaining_kwargs)
def can_be_mmapd(fd):
if not hasattr(fd, "fileno"):
return False
mode = os.fstat(fd.fileno()).st_mode
return not (stat.S_ISCHR(mode) or stat.S_ISFIFO(mode))
def read(num_bytes):
"""
Returns a string or mmap'd data containing the next num_bytes of
the file, or up to the end if there are fewer than num_bytes
left.
"""
# If file cannot be mmap'd (e.g. is stdin, or a fifo), fall back
# to doing an actual read from the file.
if not can_be_mmapd(fd):
return fd.read(handler._write_bufsize)
bytes_available = max(file_size - offset, 0)
if bytes_available == 0:
return b""
return mmap.mmap(fd.fileno(), min(handler._write_bufsize, bytes_available), offset=offset, access=mmap.ACCESS_READ)
handler._num_bytes_transmitted = 0
def report_progress(handler, num_bytes):
handler._num_bytes_transmitted += num_bytes
if file_size > 0:
ticks = int(round((handler._num_bytes_transmitted / float(file_size)) * num_ticks))
percent = int(round((handler._num_bytes_transmitted / float(file_size)) * 100))
fmt = "[{done}{pending}] Uploaded {done_bytes:,} of {total:,} bytes ({percent}%) {name}"
sys.stderr.write(fmt.format(done='=' * (ticks - 1) + '>' if ticks > 0 else '',
pending=' ' * (num_ticks - ticks),
done_bytes=handler._num_bytes_transmitted,
total=file_size,
percent=percent,
name=filename if filename is not None else ''))
sys.stderr.flush()
sys.stderr.write("\r")
sys.stderr.flush()
if show_progress:
report_progress(handler, 0)
while True:
buf = read(handler._write_bufsize)
offset += len(buf)
if len(buf) == 0:
break
handler.write(buf,
report_progress_fn=report_progress if show_progress else None,
multithread=multithread,
**remaining_kwargs)
if filename is not None:
fd.close()
handler.flush(report_progress_fn=report_progress if show_progress else None, **remaining_kwargs)
if show_progress:
sys.stderr.write("\n")
sys.stderr.flush()
if not keep_open:
handler.close(block=wait_on_close, report_progress_fn=report_progress if show_progress else None, **remaining_kwargs)
return handler
def upload_string(to_upload, media_type=None, keep_open=False, wait_on_close=False, **kwargs):
"""
:param to_upload: String to upload into a file
:type to_upload: string
:param media_type: Internet Media Type
:type media_type: string
:param keep_open: If False, closes the file after uploading
:type keep_open: boolean
:param wait_on_close: If True, waits for the file to close
:type wait_on_close: boolean
:returns: Remote file handler
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Uploads the data in the string *to_upload* into a new file object
(with media type *media_type* if given) and returns the associated
remote file handler.
"""
# Use 'a' mode because we will be responsible for closing the file
# ourselves later (if requested).
handler = new_dxfile(media_type=media_type, mode='a', **kwargs)
# For subsequent API calls, don't supply the dataobject metadata
# parameters that are only needed at creation time.
_, remaining_kwargs = dxpy.DXDataObject._get_creation_params(kwargs)
handler.write(to_upload, **remaining_kwargs)
if not keep_open:
handler.close(block=wait_on_close, **remaining_kwargs)
return handler
def list_subfolders(project, path, recurse=True):
'''
:param project: Project ID to use as context for the listing
:type project: string
:param path: Subtree root path
:type path: string
:param recurse: Return a complete subfolders tree
:type recurse: boolean
Returns a list of subfolders for the remote *path* (included to the result) of the *project*.
Example::
list_subfolders("project-xxxx", folder="/input")
'''
project_folders = dxpy.get_handler(project).describe(input_params={'folders': True})['folders']
# TODO: support shell-style path globbing (i.e. /a*/c matches /ab/c but not /a/b/c)
# return pathmatch.filter(project_folders, os.path.join(path, '*'))
if recurse:
return (f for f in project_folders if f.startswith(path))
else:
return (f for f in project_folders if f.startswith(path) and '/' not in f[len(path)+1:])
def download_folder(project, destdir, folder="/", overwrite=False, chunksize=dxfile.DEFAULT_BUFFER_SIZE,
show_progress=False, **kwargs):
'''
:param project: Project ID to use as context for this download.
:type project: string
:param destdir: Local destination location
:type destdir: string
:param folder: Path to the remote folder to download
:type folder: string
:param overwrite: Overwrite existing files
:type overwrite: boolean
Downloads the contents of the remote *folder* of the *project* into the local directory specified by *destdir*.
Example::
download_folder("project-xxxx", "/home/jsmith/input", folder="/input")
'''
def ensure_local_dir(d):
if not os.path.isdir(d):
if os.path.exists(d):
raise DXFileError("Destination location '{}' already exists and is not a directory".format(d))
logger.debug("Creating destination directory: '%s'", d)
os.makedirs(d)
def compose_local_dir(d, remote_folder, remote_subfolder):
suffix = remote_subfolder[1:] if remote_folder == "/" else remote_subfolder[len(remote_folder) + 1:]
if os.sep != '/':
suffix = suffix.replace('/', os.sep)
return os.path.join(d, suffix) if suffix != "" else d
normalized_folder = folder.strip()
if normalized_folder != "/" and normalized_folder.endswith("/"):
normalized_folder = normalized_folder[:-1]
if normalized_folder == "":
raise DXFileError("Invalid remote folder name: '{}'".format(folder))
normalized_dest_dir = os.path.normpath(destdir).strip()
if normalized_dest_dir == "":
raise DXFileError("Invalid destination directory name: '{}'".format(destdir))
# Creating target directory tree
remote_folders = list(list_subfolders(project, normalized_folder, recurse=True))
if len(remote_folders) <= 0:
raise DXFileError("Remote folder '{}' not found".format(normalized_folder))
remote_folders.sort()
for remote_subfolder in remote_folders:
ensure_local_dir(compose_local_dir(normalized_dest_dir, normalized_folder, remote_subfolder))
# Downloading files
describe_input = dict(fields=dict(folder=True, name=True, id=True))
# A generator that returns the files one by one. We don't want to materialize it, because
# there could be many files here.
files_gen = dxpy.search.find_data_objects(classname='file', state='closed', project=project,
folder=normalized_folder, recurse=True, describe=describe_input)
if files_gen is None:
# In python 3, the generator can be None, and iterating on it
# will cause an error.
return
# Now it is safe, in both python 2 and 3, to iterate on the generator
for remote_file in files_gen:
local_filename = os.path.join(compose_local_dir(normalized_dest_dir,
normalized_folder,
remote_file['describe']['folder']),
remote_file['describe']['name'])
if os.path.exists(local_filename) and not overwrite:
raise DXFileError(
"Destination file '{}' already exists but no overwrite option is provided".format(local_filename)
)
logger.debug("Downloading '%s/%s' remote file to '%s' location",
("" if remote_file['describe']['folder'] == "/" else remote_file['describe']['folder']),
remote_file['describe']['name'],
local_filename)
download_dxfile(remote_file['describe']['id'], local_filename, chunksize=chunksize, project=project,
show_progress=show_progress, **kwargs)
| 40.758017 | 130 | 0.628755 |
ace9bd8a7f9e0b76aa7e95a2f7c40b0ef1661e1e | 193 | py | Python | results/urls.py | qritwik/vtu | 0172549db5cddc4bea7a43b65856df72cc986f15 | [
"BSD-3-Clause"
] | 3 | 2019-02-26T19:19:40.000Z | 2021-05-01T10:34:15.000Z | results/urls.py | qritwik/vtu | 0172549db5cddc4bea7a43b65856df72cc986f15 | [
"BSD-3-Clause"
] | 10 | 2019-03-07T10:47:23.000Z | 2022-03-11T23:40:51.000Z | results/urls.py | qritwik/vtu-results-api | 0172549db5cddc4bea7a43b65856df72cc986f15 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import url
from results import views
urlpatterns = [
url(r'^vtu/$', views.get_result_post),
url(r'^(?P<usn>[\w\-]+)/(?P<sem>[\w\-]+)/$', views.get_result_get),
]
| 24.125 | 71 | 0.621762 |
ace9be8119d1983ee460fe960e19512bb8f709e1 | 7,890 | py | Python | Offline2/Download.py | flychensc/chives | 04e56d19b1724fc7dbb486d5b11f8397e0ff8417 | [
"Apache-2.0"
] | null | null | null | Offline2/Download.py | flychensc/chives | 04e56d19b1724fc7dbb486d5b11f8397e0ff8417 | [
"Apache-2.0"
] | null | null | null | Offline2/Download.py | flychensc/chives | 04e56d19b1724fc7dbb486d5b11f8397e0ff8417 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
__author__ = 'Shawn Chen'
import gevent.monkey
gevent.monkey.patch_all()
import sys
sys.path.append(".")
import logging
import datetime
import socket
import sqlalchemy
import tushare as ts
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
import Setting
from gevent.queue import Queue
from gevent.pool import Group
import gevent.monkey
logger = logging.getLogger("chives.Offline2.Download")
engine = create_engine(Setting.SQL_URL, echo=False)
START_DATE = (datetime.date.today()-datetime.timedelta(days=30*6)).strftime("%Y-%m-%d")
sql_type = {
"stock_basics": {
'code': sqlalchemy.CHAR(6), # 代码
'name': sqlalchemy.CHAR(8), # 名称
'industry': sqlalchemy.CHAR(8), # 所属行业
'area': sqlalchemy.CHAR(8), # 地区
'pe': sqlalchemy.FLOAT, # 市盈率
'outstanding': sqlalchemy.FLOAT, # 流通股本
'totals': sqlalchemy.FLOAT, # 总股本(万)
'totalAssets': sqlalchemy.FLOAT, # 总资产(万)
'liquidAssets': sqlalchemy.FLOAT, # 流动资产
'fixedAssets': sqlalchemy.FLOAT, # 固定资产
'reserved': sqlalchemy.FLOAT, # 公积金
'reservedPerShare': sqlalchemy.FLOAT, # 每股公积金
'eps': sqlalchemy.FLOAT, # 每股收益
'bvps': sqlalchemy.FLOAT, # 每股净资
'pb': sqlalchemy.FLOAT, # 市净率
'timeToMarket': sqlalchemy.DATE, # 上市日期
},
"history": {
'code': sqlalchemy.CHAR(6), # 代码
'date': sqlalchemy.DATE, # 交易日期
'open': sqlalchemy.FLOAT, # 开盘价
'high': sqlalchemy.FLOAT, # 最高价
'close': sqlalchemy.FLOAT, # 收盘价
'low': sqlalchemy.FLOAT, # 最低价
'volume': sqlalchemy.INTEGER, # 成交量
},
}
def _boss(boss_q, worker_q, product_q, his_list):
# 1. 遍历所有股票
defective = 0
raw_total = boss_q.qsize()
logger.info(u"共%d支股票需要处理" % raw_total)
while raw_total != product_q.qsize() + defective:
if not boss_q.empty():
[status, (stock_id, info)] = boss_q.get()
logger.debug(u"%s:%s %s" % (status, stock_id, info['name']))
if status == "ok" or status == "timeout":
worker_q.put((stock_id, info))
else:
defective += 1
gevent.sleep(0)
else:
logger.info(u"已处理%d/%d支股票" % (product_q.qsize(), raw_total))
logger.debug(u"Boss闲置中, boss_q.qsize:%d, worker_q.qsize:%d, product_q.qsize:%d"
% (boss_q.qsize(), worker_q.qsize(), product_q.qsize()))
gevent.sleep(5)
logger.info(u"共%d支股票完成处理" % product_q.qsize())
# collect history
while not product_q.empty():
his_list.append(product_q.get())
logger.debug(u"Boss收工")
def _worker(boss_q, worker_q, product_q, no):
while not worker_q.empty() or not boss_q.empty() or not product_q.empty():
if not worker_q.empty():
try:
(stock_id, info) = worker_q.get(timeout=1)
logger.debug(u"Worker%d get %s %s" % (no, stock_id, info['name']))
except gevent.queue.Empty:
logger.debug(u"Worker%d取queue超时" % no)
continue
try:
# 获取历史数据
his_data = ts.get_k_data(stock_id, start=START_DATE)
#reverse
his_data = his_data.reindex(index=his_data.index[::-1])
# string to float
his_data['open'] = his_data['open'].apply(lambda x: float(x))
his_data['high'] = his_data['high'].apply(lambda x: float(x))
his_data['low'] = his_data['low'].apply(lambda x: float(x))
his_data['close'] = his_data['close'].apply(lambda x: float(x))
his_data['volume'] = his_data['volume'].apply(lambda x: float(x))
his_data['date'] = his_data['date'].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d').date())
# trans to MultiIndex
his_data.index = [his_data['code'], his_data['date']]
# remove duplicated column 'code'
his_data = his_data.drop(['code', 'date'], axis=1)
logger.debug(u"Worker%d完成股票%s %s的工作" % (no, stock_id, info['name']))
product_q.put(his_data)
except socket.timeout:
logger.warn(u"Worker%d处理股票%s %s捕获异常:%s" % (no, stock_id, info['name'], "timeout"))
boss_q.put(["timeout", (stock_id, info)])
except Exception as e:
logger.warn(u"Worker%d处理股票%s %s捕获异常:%s" % (no, stock_id, info['name'], e))
boss_q.put([e, (stock_id, info)])
gevent.sleep(0)
else:
logger.debug(u"Worker%d闲置中, boss_q.qsize:%d, worker_q.qsize:%d, product_q.qsize:%d"
% (no, boss_q.qsize(), worker_q.qsize(), product_q.qsize()))
gevent.sleep(3)
logger.debug(u"Worker%d收工" % no)
pass
def start():
logger.info(u"离线数据更新 - Start")
# 更新股票基础信息
with engine.connect() as conn, conn.begin():
stock_basics = ts.get_stock_basics()
# START
if stock_basics['esp'].dtype == np.dtype('float64'):
# rename 'eps' to 'esp'
stock_basics["eps"] = stock_basics["esp"]
else:
# convert 'eps'
# as I found 'esp' field was '0.147㈡' at Feb.26.2016
# It cause SQL server error.
logger.warn(u"'esp'非浮点类型")
def _atof(str):
try:
return float(str)
except ValueError:
# I found 'esp' field was '0.000㈣' at Nov.8.2016
return float(str[:-1])
stock_basics["eps"] = stock_basics["esp"].apply(_atof)
stock_basics = stock_basics.drop("esp", axis=1)
# drop timeToMarket is zero
stock_basics = stock_basics[stock_basics['timeToMarket']!=0]
# change sql type
stock_basics['timeToMarket'] = stock_basics['timeToMarket'].apply(lambda x:datetime.datetime.strptime(str(x), "%Y%m%d").date())
# END
stock_basics.to_sql("stock_basics", conn,
if_exists='replace',
dtype=sql_type["stock_basics"])
# 更新历史数据(并发处理)
group = Group()
worker_q = Queue()
boss_q = Queue()
product_q = Queue()
his_list = list()
logger.debug(u"装载全部股票代码")
for stock_id, info in stock_basics.iterrows():
boss_q.put(["ok", (stock_id, info)])
group.add(gevent.spawn(_boss,
boss_q=boss_q, worker_q=worker_q, product_q=product_q,
his_list=his_list))
for i in range(10):
group.add(gevent.spawn(_worker,
boss_q=boss_q, worker_q=worker_q, product_q=product_q,
no=i))
group.join()
# merge history
# 获取历史数据
with engine.connect() as conn, conn.begin():
pd.concat(his_list).to_sql("history", conn,
if_exists='replace',
index_label=['code', 'date'],
dtype=sql_type["history"])
logger.info(u"离线数据更新 - End")
pass
if __name__ == "__main__":
# 创建logger
logger = logging.getLogger("chives")
logger.setLevel(logging.DEBUG)
# 定义log格式
log_format = logging.Formatter('%(asctime)s %(name)s:%(levelname)s %(message)s')
# 创建控制台handler
console_handle = logging.StreamHandler()
console_handle.setLevel(logging.DEBUG)
console_handle.setFormatter(log_format)
# 创建一个handler,用于写入日志文件
# file_handle = logging.FileHandler(datetime.date.today().strftime('%Y-%m-%d')+".log", 'w')
# file_handle.setLevel(logging.DEBUG)
# file_handle.setFormatter(log_format)
# 注册handler
logger.addHandler(console_handle)
# logger.addHandler(file_handle)
start()
pass
| 35.381166 | 135 | 0.564892 |
ace9bed59a1a2eff7f799d86b9823ec157cba16e | 1,158 | py | Python | tag/Tag.py | CaptainSpam/autonifty | 66d500b8b7a867882e671aa4cba1b3be1ed0907b | [
"MIT"
] | null | null | null | tag/Tag.py | CaptainSpam/autonifty | 66d500b8b7a867882e671aa4cba1b3be1ed0907b | [
"MIT"
] | null | null | null | tag/Tag.py | CaptainSpam/autonifty | 66d500b8b7a867882e671aa4cba1b3be1ed0907b | [
"MIT"
] | null | null | null | class Tag(object):
'''
A Tag is a single set of instructions to replace a given unparsed tag.
This can be as simple as inserting the current date or the comic's name to
more complex nonsense like parsing a new page (include), determining which
comic to put on a certain request, or crazy regex substitution.
'''
def __init__(self):
self._tagname = "Tag"
def do_tag(self, match, parser):
'''
Does tag stuff. This gets the required match data and returns whatever
should go in this tag's place. The Parser will know what to do with it.
'''
pass
def reset_for_day(self):
'''
Tags are instanced once per parsing session in the current
implementation. This method resets anything a tag needs reset when
starting on a new comic day. Just in case.
'''
pass
def reset_for_page(self):
'''
Tags are instanced once per parsing session in the current
implementation. This method resets anything a tag needs reset when
starting on a new page that isn't a new comic day.
'''
pass
| 35.090909 | 80 | 0.639033 |
ace9bf825856f7eecff70c370fc2a9ec93deb76e | 2,187 | py | Python | pymc3/distributions/simulator.py | andy-vh/pymc3 | eec70e86ddc21889142841c565ab2b182b6cce51 | [
"Apache-2.0"
] | 3 | 2020-10-06T21:07:30.000Z | 2021-03-04T11:40:17.000Z | pymc3/distributions/simulator.py | andy-vh/pymc3 | eec70e86ddc21889142841c565ab2b182b6cce51 | [
"Apache-2.0"
] | null | null | null | pymc3/distributions/simulator.py | andy-vh/pymc3 | eec70e86ddc21889142841c565ab2b182b6cce51 | [
"Apache-2.0"
] | 3 | 2019-09-09T13:09:32.000Z | 2021-09-12T14:37:51.000Z | # Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .distribution import NoDistribution
__all__ = ["Simulator"]
class Simulator(NoDistribution):
def __init__(self, function, *args, **kwargs):
"""
This class stores a function defined by the user in python language.
function: function
Simulation function defined by the user.
*args and **kwargs:
Arguments and keywords arguments that the function takes.
"""
self.function = function
observed = self.data
super().__init__(shape=np.prod(observed.shape), dtype=observed.dtype, *args, **kwargs)
def random(self, point=None, size=None):
"""
Draw random values from Simulator
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
raise NotImplementedError("Not implemented yet")
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
name = r"\text{%s}" % name
function = dist.function
params = dist.parameters
sum_stat = dist.sum_stat
return r"${} \sim \text{{Simulator}}(\mathit{{function}}={},~\mathit{{parameters}}={},~\mathit{{summary statistics}}={})$".format(
name, function, params, sum_stat
)
| 34.171875 | 138 | 0.629172 |
ace9bfa4f69187b12df32fefc6ea58a83e952073 | 311 | py | Python | tempdir/midterm_app.py | Arceusir/MIDTERM | 1b8ce1fd1af5e5968466752490c6719f767dbd93 | [
"MIT"
] | null | null | null | tempdir/midterm_app.py | Arceusir/MIDTERM | 1b8ce1fd1af5e5968466752490c6719f767dbd93 | [
"MIT"
] | null | null | null | tempdir/midterm_app.py | Arceusir/MIDTERM | 1b8ce1fd1af5e5968466752490c6719f767dbd93 | [
"MIT"
] | null | null | null | from flask import Flask
from flask import request, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template("login.html")
@app.route('/register')
def register():
return render_template("register.html")
if __name__ == '__main__':
app.run(host="0.0.0.0",port=5000) | 20.733333 | 43 | 0.70418 |
ace9c048d16ba2e199606d058124e4919bf4f179 | 17,807 | py | Python | examples/multi_physics/biot_parallel_interactive.py | clazaro/sfepy | 78757a6989d6aaf85a3fb27957b9179c5e2aa2c7 | [
"BSD-3-Clause"
] | null | null | null | examples/multi_physics/biot_parallel_interactive.py | clazaro/sfepy | 78757a6989d6aaf85a3fb27957b9179c5e2aa2c7 | [
"BSD-3-Clause"
] | null | null | null | examples/multi_physics/biot_parallel_interactive.py | clazaro/sfepy | 78757a6989d6aaf85a3fb27957b9179c5e2aa2c7 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
r"""
Parallel assembling and solving of a Biot problem (deformable porous medium),
using commands for interactive use.
Find :math:`\ul{u}`, :math:`p` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
- \int_{\Omega} p\ \alpha_{ij} e_{ij}(\ul{v})
= 0
\;, \quad \forall \ul{v} \;,
\int_{\Omega} q\ \alpha_{ij} e_{ij}(\ul{u})
+ \int_{\Omega} K_{ij} \nabla_i q \nabla_j p
= 0
\;, \quad \forall q \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
Important Notes
---------------
- This example requires petsc4py, mpi4py and (optionally) pymetis with their
dependencies installed!
- This example generates a number of files - do not use an existing non-empty
directory for the ``output_dir`` argument.
- Use the ``--clear`` option with care!
Notes
-----
- Each task is responsible for a subdomain consisting of a set of cells (a cell
region).
- Each subdomain owns PETSc DOFs within a consecutive range.
- When both global and task-local variables exist, the task-local
variables have ``_i`` suffix.
- This example shows how to use a nonlinear solver from PETSc.
- This example can serve as a template for solving a (non)linear multi-field
problem - just replace the equations in :func:`create_local_problem()`.
- The material parameter :math:`\alpha_{ij}` is artificially high to be able to
see the pressure influence on displacements.
- The command line options are saved into <output_dir>/options.txt file.
Usage Examples
--------------
See all options::
$ python examples/multi_physics/biot_parallel_interactive.py -h
See PETSc options::
$ python examples/multi_physics/biot_parallel_interactive.py -help
Single process run useful for debugging with :func:`debug()
<sfepy.base.base.debug>`::
$ python examples/multi_physics/biot_parallel_interactive.py output-parallel
Parallel runs::
$ mpiexec -n 3 python examples/multi_physics/biot_parallel_interactive.py output-parallel -2 --shape=101,101
$ mpiexec -n 3 python examples/multi_physics/biot_parallel_interactive.py output-parallel -2 --shape=101,101 --metis
$ mpiexec -n 8 python examples/multi_physics/biot_parallel_interactive.py output-parallel -2 --shape 101,101 --metis -snes_monitor -snes_view -snes_converged_reason -ksp_monitor
Using FieldSplit preconditioner::
$ mpiexec -n 2 python examples/multi_physics/biot_parallel_interactive.py output-parallel --shape=101,101 -snes_monitor -snes_converged_reason -ksp_monitor -pc_type fieldsplit
$ mpiexec -n 8 python examples/multi_physics/biot_parallel_interactive.py output-parallel --shape=1001,1001 --metis -snes_monitor -snes_converged_reason -ksp_monitor -pc_type fieldsplit -pc_fieldsplit_type additive
View the results using (strip linearization or approximation orders one)::
$ python postproc.py output-parallel/sol.h5 --wireframe -b -d'p,plot_warp_scalar:u,plot_displacements'
View the results using (adaptive linearization)::
$ python postproc.py output-parallel/sol_u.h5 --wireframe -b -d'u,plot_displacements'
$ python postproc.py output-parallel/sol_p.h5 --wireframe -b -d'p,plot_warp_scalar'
"""
from __future__ import absolute_import
from argparse import RawDescriptionHelpFormatter, ArgumentParser
import os
import time
import numpy as nm
from sfepy.base.base import output, Struct
from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.discrete.common.region import Region
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem, State)
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.terms import Term
from sfepy.solvers.ls import PETScKrylovSolver
from sfepy.solvers.nls import PETScNonlinearSolver
from sfepy.mechanics.matcoefs import stiffness_from_lame
import sfepy.parallel.parallel as pl
from sfepy.parallel.evaluate import PETScParallelEvaluator
def create_local_problem(omega_gi, orders):
"""
Local problem definition using a domain corresponding to the global region
`omega_gi`.
"""
order_u, order_p = orders
mesh = omega_gi.domain.mesh
# All tasks have the whole mesh.
bbox = mesh.get_bounding_box()
min_x, max_x = bbox[:, 0]
eps_x = 1e-8 * (max_x - min_x)
min_y, max_y = bbox[:, 1]
eps_y = 1e-8 * (max_y - min_y)
mesh_i = Mesh.from_region(omega_gi, mesh, localize=True)
domain_i = FEDomain('domain_i', mesh_i)
omega_i = domain_i.create_region('Omega', 'all')
gamma1_i = domain_i.create_region('Gamma1',
'vertices in (x < %.10f)'
% (min_x + eps_x),
'facet', allow_empty=True)
gamma2_i = domain_i.create_region('Gamma2',
'vertices in (x > %.10f)'
% (max_x - eps_x),
'facet', allow_empty=True)
gamma3_i = domain_i.create_region('Gamma3',
'vertices in (y < %.10f)'
% (min_y + eps_y),
'facet', allow_empty=True)
field1_i = Field.from_args('fu', nm.float64, mesh.dim, omega_i,
approx_order=order_u)
field2_i = Field.from_args('fp', nm.float64, 1, omega_i,
approx_order=order_p)
output('field 1: number of local DOFs:', field1_i.n_nod)
output('field 2: number of local DOFs:', field2_i.n_nod)
u_i = FieldVariable('u_i', 'unknown', field1_i, order=0)
v_i = FieldVariable('v_i', 'test', field1_i, primary_var_name='u_i')
p_i = FieldVariable('p_i', 'unknown', field2_i, order=1)
q_i = FieldVariable('q_i', 'test', field2_i, primary_var_name='p_i')
if mesh.dim == 2:
alpha = 1e2 * nm.array([[0.132], [0.132], [0.092]])
else:
alpha = 1e2 * nm.array([[0.132], [0.132], [0.132],
[0.092], [0.092], [0.092]])
mat = Material('m', D=stiffness_from_lame(mesh.dim, lam=10, mu=5),
k=1, alpha=alpha)
integral = Integral('i', order=2*(max(order_u, order_p)))
t11 = Term.new('dw_lin_elastic(m.D, v_i, u_i)',
integral, omega_i, m=mat, v_i=v_i, u_i=u_i)
t12 = Term.new('dw_biot(m.alpha, v_i, p_i)',
integral, omega_i, m=mat, v_i=v_i, p_i=p_i)
t21 = Term.new('dw_biot(m.alpha, u_i, q_i)',
integral, omega_i, m=mat, u_i=u_i, q_i=q_i)
t22 = Term.new('dw_laplace(m.k, q_i, p_i)',
integral, omega_i, m=mat, q_i=q_i, p_i=p_i)
eq1 = Equation('eq1', t11 - t12)
eq2 = Equation('eq1', t21 + t22)
eqs = Equations([eq1, eq2])
ebc1 = EssentialBC('ebc1', gamma1_i, {'u_i.all' : 0.0})
ebc2 = EssentialBC('ebc2', gamma2_i, {'u_i.0' : 0.05})
def bc_fun(ts, coors, **kwargs):
val = 0.3 * nm.sin(4 * nm.pi * (coors[:, 0] - min_x) / (max_x - min_x))
return val
fun = Function('bc_fun', bc_fun)
ebc3 = EssentialBC('ebc3', gamma3_i, {'p_i.all' : fun})
pb = Problem('problem_i', equations=eqs, active_only=False)
pb.time_update(ebcs=Conditions([ebc1, ebc2, ebc3]))
pb.update_materials()
return pb
def solve_problem(mesh_filename, options, comm):
order_u = options.order_u
order_p = options.order_p
rank, size = comm.Get_rank(), comm.Get_size()
output('rank', rank, 'of', size)
mesh = Mesh.from_file(mesh_filename)
if rank == 0:
cell_tasks = pl.partition_mesh(mesh, size, use_metis=options.metis,
verbose=True)
else:
cell_tasks = None
output('creating global domain and fields...')
tt = time.clock()
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field1 = Field.from_args('fu', nm.float64, mesh.dim, omega,
approx_order=order_u)
field2 = Field.from_args('fp', nm.float64, 1, omega,
approx_order=order_p)
fields = [field1, field2]
output('...done in', time.clock() - tt)
output('distributing fields...')
tt = time.clock()
distribute = pl.distribute_fields_dofs
lfds, gfds = distribute(fields, cell_tasks,
is_overlap=True,
use_expand_dofs=True,
save_inter_regions=options.save_inter_regions,
output_dir=options.output_dir,
comm=comm, verbose=True)
output('...done in', time.clock() - tt)
output('creating local problem...')
tt = time.clock()
cells = lfds[0].cells
omega_gi = Region.from_cells(cells, domain)
omega_gi.finalize()
omega_gi.update_shape()
pb = create_local_problem(omega_gi, [order_u, order_p])
variables = pb.get_variables()
state = State(variables)
state.fill(0.0)
state.apply_ebc()
output('...done in', time.clock() - tt)
output('allocating global system...')
tt = time.clock()
sizes, drange, pdofs = pl.setup_composite_dofs(lfds, fields, variables,
verbose=True)
pmtx, psol, prhs = pl.create_petsc_system(pb.mtx_a, sizes, pdofs, drange,
is_overlap=True, comm=comm,
verbose=True)
output('...done in', time.clock() - tt)
output('creating solver...')
tt = time.clock()
conf = Struct(method='bcgsl', precond='jacobi', sub_precond='none',
i_max=10000, eps_a=1e-50, eps_r=1e-6, eps_d=1e4,
verbose=True)
status = {}
ls = PETScKrylovSolver(conf, comm=comm, mtx=pmtx, status=status)
field_ranges = {}
for ii, variable in enumerate(variables.iter_state(ordered=True)):
field_ranges[variable.name] = lfds[ii].petsc_dofs_range
ls.set_field_split(field_ranges, comm=comm)
ev = PETScParallelEvaluator(pb, pdofs, drange, True,
psol, comm, verbose=True)
nls_status = {}
conf = Struct(method='newtonls',
i_max=5, eps_a=0, eps_r=1e-5, eps_s=0.0,
verbose=True)
nls = PETScNonlinearSolver(conf, pmtx=pmtx, prhs=prhs, comm=comm,
fun=ev.eval_residual,
fun_grad=ev.eval_tangent_matrix,
lin_solver=ls, status=nls_status)
output('...done in', time.clock() - tt)
output('solving...')
tt = time.clock()
state = pb.create_state()
state.apply_ebc()
ev.psol_i[...] = state()
ev.gather(psol, ev.psol_i)
psol = nls(psol)
ev.scatter(ev.psol_i, psol)
sol0_i = ev.psol_i[...]
output('...done in', time.clock() - tt)
output('saving solution...')
tt = time.clock()
state.set_full(sol0_i)
out = state.create_output_dict()
filename = os.path.join(options.output_dir, 'sol_%02d.h5' % comm.rank)
pb.domain.mesh.write(filename, io='auto', out=out)
gather_to_zero = pl.create_gather_to_zero(psol)
psol_full = gather_to_zero(psol)
if comm.rank == 0:
sol = psol_full[...].copy()
u = FieldVariable('u', 'parameter', field1,
primary_var_name='(set-to-None)')
remap = gfds[0].id_map
ug = sol[remap]
p = FieldVariable('p', 'parameter', field2,
primary_var_name='(set-to-None)')
remap = gfds[1].id_map
pg = sol[remap]
if (((order_u == 1) and (order_p == 1))
or (options.linearization == 'strip')):
out = u.create_output(ug)
out.update(p.create_output(pg))
filename = os.path.join(options.output_dir, 'sol.h5')
mesh.write(filename, io='auto', out=out)
else:
out = u.create_output(ug, linearization=Struct(kind='adaptive',
min_level=0,
max_level=order_u,
eps=1e-3))
filename = os.path.join(options.output_dir, 'sol_u.h5')
out['u'].mesh.write(filename, io='auto', out=out)
out = p.create_output(pg, linearization=Struct(kind='adaptive',
min_level=0,
max_level=order_p,
eps=1e-3))
filename = os.path.join(options.output_dir, 'sol_p.h5')
out['p'].mesh.write(filename, io='auto', out=out)
output('...done in', time.clock() - tt)
helps = {
'output_dir' :
'output directory',
'dims' :
'dimensions of the block [default: %(default)s]',
'shape' :
'shape (counts of nodes in x, y, z) of the block [default: %(default)s]',
'centre' :
'centre of the block [default: %(default)s]',
'2d' :
'generate a 2D rectangle, the third components of the above'
' options are ignored',
'u-order' :
'displacement field approximation order',
'p-order' :
'pressure field approximation order',
'linearization' :
'linearization used for storing the results with approximation order > 1'
' [default: %(default)s]',
'metis' :
'use metis for domain partitioning',
'save_inter_regions' :
'save inter-task regions for debugging partitioning problems',
'silent' : 'do not print messages to screen',
'clear' :
'clear old solution files from output directory'
' (DANGEROUS - use with care!)',
}
def main():
parser = ArgumentParser(description=__doc__.rstrip(),
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('output_dir', help=helps['output_dir'])
parser.add_argument('--dims', metavar='dims',
action='store', dest='dims',
default='1.0,1.0,1.0', help=helps['dims'])
parser.add_argument('--shape', metavar='shape',
action='store', dest='shape',
default='11,11,11', help=helps['shape'])
parser.add_argument('--centre', metavar='centre',
action='store', dest='centre',
default='0.0,0.0,0.0', help=helps['centre'])
parser.add_argument('-2', '--2d',
action='store_true', dest='is_2d',
default=False, help=helps['2d'])
parser.add_argument('--u-order', metavar='int', type=int,
action='store', dest='order_u',
default=1, help=helps['u-order'])
parser.add_argument('--p-order', metavar='int', type=int,
action='store', dest='order_p',
default=1, help=helps['p-order'])
parser.add_argument('--linearization', choices=['strip', 'adaptive'],
action='store', dest='linearization',
default='strip', help=helps['linearization'])
parser.add_argument('--metis',
action='store_true', dest='metis',
default=False, help=helps['metis'])
parser.add_argument('--save-inter-regions',
action='store_true', dest='save_inter_regions',
default=False, help=helps['save_inter_regions'])
parser.add_argument('--silent',
action='store_true', dest='silent',
default=False, help=helps['silent'])
parser.add_argument('--clear',
action='store_true', dest='clear',
default=False, help=helps['clear'])
options, petsc_opts = parser.parse_known_args()
comm = pl.PETSc.COMM_WORLD
output_dir = options.output_dir
filename = os.path.join(output_dir, 'output_log_%02d.txt' % comm.rank)
if comm.rank == 0:
ensure_path(filename)
comm.barrier()
output.prefix = 'sfepy_%02d:' % comm.rank
output.set_output(filename=filename, combined=options.silent == False)
output('petsc options:', petsc_opts)
mesh_filename = os.path.join(options.output_dir, 'para.h5')
if comm.rank == 0:
from sfepy.mesh.mesh_generators import gen_block_mesh
if options.clear:
remove_files_patterns(output_dir,
['*.h5', '*.mesh', '*.txt'],
ignores=['output_log_%02d.txt' % ii
for ii in range(comm.size)],
verbose=True)
save_options(os.path.join(output_dir, 'options.txt'),
[('options', vars(options))])
dim = 2 if options.is_2d else 3
dims = nm.array(eval(options.dims), dtype=nm.float64)[:dim]
shape = nm.array(eval(options.shape), dtype=nm.int32)[:dim]
centre = nm.array(eval(options.centre), dtype=nm.float64)[:dim]
output('dimensions:', dims)
output('shape: ', shape)
output('centre: ', centre)
mesh = gen_block_mesh(dims, shape, centre, name='block-fem',
verbose=True)
mesh.write(mesh_filename, io='auto')
comm.barrier()
output('field u order:', options.order_u)
output('field p order:', options.order_p)
solve_problem(mesh_filename, options, comm)
if __name__ == '__main__':
main()
| 36.639918 | 216 | 0.588083 |
ace9c193df2914bc8dcc854a73b02ccbf10cecf1 | 599 | py | Python | packages/aws-cdk/lib/init-templates/v1/sample-app/python/%name.PythonModule%/%name.PythonModule%_stack.template.py | RichiCoder1/aws-cdk | 626e6aa1a27feffe7ce60a46a6fdcf26f317eaef | [
"Apache-2.0"
] | 6,159 | 2019-07-11T16:53:02.000Z | 2022-03-31T20:52:53.000Z | packages/aws-cdk/lib/init-templates/v1/sample-app/python/%name.PythonModule%/%name.PythonModule%_stack.template.py | RichiCoder1/aws-cdk | 626e6aa1a27feffe7ce60a46a6fdcf26f317eaef | [
"Apache-2.0"
] | 16,881 | 2019-07-11T18:58:07.000Z | 2022-03-31T23:59:47.000Z | packages/aws-cdk/lib/init-templates/v1/sample-app/python/%name.PythonModule%/%name.PythonModule%_stack.template.py | RichiCoder1/aws-cdk | 626e6aa1a27feffe7ce60a46a6fdcf26f317eaef | [
"Apache-2.0"
] | 2,504 | 2019-07-11T17:52:52.000Z | 2022-03-31T21:19:53.000Z | from aws_cdk import (
aws_iam as iam,
aws_sqs as sqs,
aws_sns as sns,
aws_sns_subscriptions as subs,
core
)
class %name.PascalCased%Stack(core.Stack):
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
queue = sqs.Queue(
self, "%name.PascalCased%Queue",
visibility_timeout=core.Duration.seconds(300),
)
topic = sns.Topic(
self, "%name.PascalCased%Topic"
)
topic.add_subscription(subs.SqsSubscription(queue))
| 23.96 | 83 | 0.619366 |
ace9c22ba65e00a8a85f6a7856c1c2d8444edd10 | 12,583 | py | Python | lib/fathead/airports/parse.py | aeisenberg/zeroclickinfo-fathead | 9be00a038d812ca9ccd0d601220afde777ab2f8e | [
"Apache-2.0"
] | 1 | 2021-01-05T16:48:23.000Z | 2021-01-05T16:48:23.000Z | lib/fathead/airports/parse.py | aeisenberg/zeroclickinfo-fathead | 9be00a038d812ca9ccd0d601220afde777ab2f8e | [
"Apache-2.0"
] | null | null | null | lib/fathead/airports/parse.py | aeisenberg/zeroclickinfo-fathead | 9be00a038d812ca9ccd0d601220afde777ab2f8e | [
"Apache-2.0"
] | 1 | 2016-06-12T06:12:02.000Z | 2016-06-12T06:12:02.000Z | #! env python
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import logging
import os
import re
# TODO look for a better way to enforce it
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# The "APH" airport code corresponds to the Jacksonville International
# Airport in Jacksonville, FL, United Sates.
# Limit the entries to the code (e.g. 'JAX' ) and the airport name +
# 'code' (e.g. 'Jacksonville International Airport code'). The latter
# being a redirect to the former. We could also include one with the
# word 'airport' removed (e.g. 'Jacksonville International code').
# Having the result for the city name would cover too many searches
# that aren't looking for the airport code.
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
OUTPUT_FILE = 'output.txt'
INDEXES = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
WIKIPEDIA_URL = 'https://en.wikipedia.org'
WIKIPEDIA_LIST_URL = WIKIPEDIA_URL + '/wiki/List_of_airports_by_IATA_code:_'
def append_period(text):
""" Append a period at the end of the sentence"""
if text[-1] == '\"':
return text[0:-1]+'.\"'
return text
def getFields(name, linetype, abstract=''):
return [name, # $unique_name
linetype, # $type
'', # $redirect
'', # $otheruses
'', # $categories
'', # $references
'', # $see_also
'', # $further_reading
'', # $external_links
'', # $disambiguation
'', # images
abstract, # abstract
''] # source link
class Airport(object):
iata_abstract_format = 'The "{0}" IATA airport code corresponds to {2} ' \
'in {3}'
icao_abstract_format = 'The "{1}" ICAO airport code corresponds to {2} ' \
'in {3} and the IATA code is "{0}"'
name_abstract_format = 'The IATA code for the {2} is "{0}"'
location_abstract_format = 'The IATA code for the {2} near {3} is "{0}"'
abstract_icao_format = ' and the ICAO code is "{1}"'
""" Contains informations about an Airport"""
def __init__(self, name, iata, icao, location, url):
self.name = name
self.iata = iata
self.icao = icao
self.location = location
self._url = url
self.international_airport_name = None
self.name_with_airport = None
self.abstract_icao_part = ''
if self.icao != '':
self.abstract_icao_part = self._format(
Airport.abstract_icao_format)
# Put name with airport and international airport
self.name_with_airport = self.name
if self.name_with_airport.find('Airport') < 0:
self.name_with_airport += ' Airport'
index = self.name_with_airport.rfind(' International Airport')
if index > 0:
self.international_airport_name = self.name_with_airport
self.airport_location_name = None
if self.location is not None:
location_names = self.location.split(',')
if len(location_names) > 0:
self.airport_location_name = location_names[0]+' Airport'
if self.airport_location_name == self.name_with_airport:
self.airport_location_name = None
# remove redundancy in airports/location names
if self.name_with_airport is not None \
and self.name_with_airport.find('airports in ') != -1:
self.name_with_airport = 'airports'
def _format(self, string):
return string.format(self.iata, self.icao, self.name_with_airport,
self.location)
def add_iata(self, output):
abstract = self._format(Airport.iata_abstract_format) + \
self.abstract_icao_part
if self.iata is not None and len(self.iata) != 0:
fields = self._getFields(self.iata, 'A', append_period(abstract))
output.append('%s' % ('\t'.join(fields)))
def add_icao(self, output):
abstract = self._format(Airport.icao_abstract_format)
if self.icao is not None and len(self.icao) != 0:
fields = self._getFields(self.icao, 'A', append_period(abstract))
output.append('%s' % ('\t'.join(fields)))
def add_name(self, output):
abstract = self._format(Airport.name_abstract_format) + \
self.abstract_icao_part
if self.name_with_airport is not None \
and len(self.name_with_airport) != "":
fields = self._getFields(self.name_with_airport, 'A',
append_period(abstract))
output.append('%s' % ('\t'.join(fields)))
def add_location(self, output):
abstract = self._format(Airport.location_abstract_format) + \
self.abstract_icao_part
if self.airport_location_name is not None:
fields = self._getFields(self.airport_location_name, 'A',
append_period(abstract))
output.append('%s' % ('\t'.join(fields)))
def add_redirects(self, output, withRedirect):
if self.international_airport_name is None:
return
name_split = self.international_airport_name.split('Airport', 2)
if len(name_split) > 1:
fields = self._getFields(name_split[0].strip(), 'R')
fields[2] = self.international_airport_name
fields[12] = ''
output.append('%s' % ('\t'.join(fields)))
if withRedirect:
fields = self._getFields(self.name_with_airport, 'R')
fields[2] = self.iata
fields[12] = ''
output.append('%s' % ('\t'.join(fields)))
def _getFields(self, name, linetype, abstract=''):
fields = getFields(name, linetype, abstract)
fields[12] = self._url
return fields
def __str__(self):
return self.name_with_airport + ';' + self.iata + ';' + self.icao + \
';' + self.location + ';' + self._url
def html_element_to_text(html_element):
# Grabbing text of element
text = html_element.getText()
# Stripping out line breaks (e.g.: Cloudbreak / Western Australia Airport)
text = re.sub('\s*[\n\r]+\s*', ' ', text)
return text.strip()
class Parser(object):
""" Parses a HTML file to get all the airports codes """
def __init__(self, index_letter):
self.soup = BeautifulSoup(open('download/'+index_letter), "html5lib",
from_encoding='utf-8')
self.index_letter = index_letter
def get_airports(self):
self.airports = []
# First table in the page holds the main airport information
table = self.soup.find_all('table')[0]
line_number = 0
rows = table.find_all('tr')
if len(rows) < 1:
raise Exception('Table for index_letter %s is too small'
% (self.index_letter))
# Guard against format/column changes in the table
header_row = rows[0].find_all('th')
if len(rows) < 4:
raise Exception('Table for index_letter %s is too few columns'
% (self.index_letter))
if 'IATA' not in header_row[0].getText().strip():
raise Exception('Could not find IATA column in table for '
'index_letter %s' % (self.index_letter))
if 'ICAO' not in header_row[1].getText().strip():
raise Exception('Could not find ICAO column in table for '
'index_letter %s' % (self.index_letter))
if 'Airport' not in header_row[2].getText().strip():
raise Exception('Could not find "Airport" column in table '
'for index_letter %s' % (self.index_letter))
if 'Location' not in header_row[3].getText().strip():
raise Exception('Could not find "Location" column in table '
'for index_letter %s' % (self.index_letter))
# The table format matches the expected format
for row in rows[1::]:
line_number += 1
data = row.find_all('td')
if len(data) < 4: # partial table heading
continue
airport_link = data[2].find('a')
if airport_link is not None:
airport_element = airport_link
else:
airport_element = data[2]
airport_name = html_element_to_text(airport_element)
if not airport_name:
airport_element = data[3].find('a')
if airport_element is None:
airport_element = data[3]
airport_name = html_element_to_text(airport_element)
url = None
# Try to extract the airport's url from the link in the table. But
# only if it's not a redlink (i.e.: The wiki page exists)
if airport_link is not None:
href = airport_link['href']
if href and "redlink=1" not in href:
url = href
if url.startswith("/"):
url = WIKIPEDIA_URL + url
# No existing Wikipage found. Hence linking to the table
if not url:
url = WIKIPEDIA_LIST_URL + self.index_letter
# logger.debug(data)
self.airports.append(
Airport(
airport_name,
html_element_to_text(data[0]), # IATA
html_element_to_text(data[1]), # ICAO
html_element_to_text(data[3]),
url))
def addDisambituation(value, airport, disambiguations):
if value is not None and value in disambiguations:
if not any(map(
lambda x: x.iata == airport.iata, disambiguations[value])):
disambiguations[value].append(airport)
else:
disambiguations[value] = [airport]
def findAndMarkDisambiguations(airports):
disambiguations = {}
for airport in airports:
addDisambituation(airport.name_with_airport, airport, disambiguations)
addDisambituation(airport.airport_location_name, airport,
disambiguations)
addDisambituation(airport.international_airport_name, airport,
disambiguations)
for airport in airports:
if airport.icao is not None and len(airport.icao) > 0 \
and airport.icao in disambiguations:
disambiguations[airport.icao].append(airport)
else:
disambiguations[airport.icao] = [airport]
return disambiguations
def print_disambiguation((key, airports)):
fields = getFields(key, 'D')
for airport in airports:
string = '*'
string += '[['+airport.iata+']] '
fields[9] += string+airport.name+' in '+airport.location+'\\n'
ret = '%s' % ('\t'.join(fields))+'\n'
if re.match('.*Airport', key):
fields = getFields(key, 'R')
fields[2] = fields[0]
fields[12] = ''
fields[0] = fields[0]+'s'
ret = ret + '%s' % ('\t'.join(fields))+'\n'
return ret
if __name__ == '__main__':
with open(OUTPUT_FILE, 'w') as output:
airports = []
# parse all
for i in INDEXES:
parser = Parser(i)
logger.debug("Index: "+i)
parser.get_airports()
airports += parser.airports
disambiguations = findAndMarkDisambiguations(airports)
# print all the rest
for airport in airports:
strings = []
airport.add_iata(strings)
if len(disambiguations[airport.icao]) == 1:
airport.add_icao(strings)
ian = airport.international_airport_name
if ian is not None and len(disambiguations[ian]) == 1:
airport.add_redirects(
strings,
airport.name_with_airport not in disambiguations
)
if len(disambiguations[airport.name_with_airport]) == 1:
airport.add_name(strings)
if len(disambiguations[airport.airport_location_name]) == 1:
airport.add_location(strings)
output.write('\n'.join(strings)+'\n')
# print disambiguations
map(output.write, map(print_disambiguation,
filter(lambda (x, y): len(y) > 1,
disambiguations.items())))
| 38.015106 | 78 | 0.576333 |
ace9c28afed1c8c6d5dd5c142451a0c4b6f4bdf4 | 755 | py | Python | nncrypt/validation.py | seungwonpark/LearningToProtect | b0b4367acd8f998b2eff18ed34eeb7bd3e9a14ed | [
"Apache-2.0"
] | 5 | 2020-03-29T17:38:08.000Z | 2022-01-21T14:14:56.000Z | nncrypt/validation.py | seungwonpark/LearningToProtect | b0b4367acd8f998b2eff18ed34eeb7bd3e9a14ed | [
"Apache-2.0"
] | null | null | null | nncrypt/validation.py | seungwonpark/LearningToProtect | b0b4367acd8f998b2eff18ed34eeb7bd3e9a14ed | [
"Apache-2.0"
] | null | null | null | import torch
def validate(hp, args, alice, bob, eve, valloader, writer, step):
alice.eval(); bob.eval(); eve.eval()
correct_e = 0
correct_b = 0
for plain, key in valloader:
plain = plain.cuda()
key = key.cuda()
cipher = alice(plain, key)
outE = eve(cipher)
outB = bob(cipher, key)
correct_e += torch.sum(torch.abs(plain-outE)<1).item() / hp.data.plain
correct_b += torch.sum(torch.abs(plain-outB)<1).item() / hp.data.plain
acc_e = correct_e / len(valloader.dataset)
acc_b = correct_b / len(valloader.dataset)
writer.log_accuracy(acc_b, acc_e, step)
print('Accuracy(%%): Bob %.1f Eve %.1f' % (100.*acc_b, 100.*acc_e))
alice.train(); bob.train(); eve.train()
| 30.2 | 78 | 0.606623 |
ace9c2d6422d992d264d9db11d2b26e9249ab0c2 | 6,503 | py | Python | tests/ut/python/parallel/parallel_end_to_end/hcom/_test_allreduce_4p.py | unseenme/mindspore | 4ba052f0cd9146ac0ccc4880a778706f1b2d0af8 | [
"Apache-2.0"
] | 2 | 2020-04-28T03:49:10.000Z | 2020-04-28T03:49:13.000Z | tests/ut/python/parallel/parallel_end_to_end/hcom/_test_allreduce_4p.py | liyong126/mindspore | 930a1fb0a8fa9432025442c4f4732058bb7af592 | [
"Apache-2.0"
] | 7 | 2020-03-30T08:31:56.000Z | 2020-04-01T09:54:39.000Z | tests/ut/python/parallel/parallel_end_to_end/hcom/_test_allreduce_4p.py | liyong126/mindspore | 930a1fb0a8fa9432025442c4f4732058bb7af592 | [
"Apache-2.0"
] | 1 | 2020-03-30T17:07:43.000Z | 2020-03-30T17:07:43.000Z | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import mindspore as ms
from mindspore.nn import Cell
from mindspore import context
from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
import mindspore.communication.management as distributedTool
from mindspore.ops.composite import grad_all_with_sens
device_num=4
device_id = int(os.environ["RANK_ID"])
path = "./output/"
def setup_module():
print("~~~~~~~~~~~set up~~~~~~~~~~~~~")
context.set_context(mode=context.GRAPH_MODE)
context.set_auto_parallel_context(device_num=device_num, global_rank=device_id)
distributedTool.init()
print("~~~~~~~~~~~set up finished~~~~~~~~~~~~~")
def teardown_module():
print("~~~~~~~~~~~~tear down~~~~~~~~~~")
class MatmulSingle(Cell):
def __init__(self, transpose_a=False, transpose_b=False):
super(MatmulSingle, self).__init__()
self.matmul1 = P.MatMul(transpose_a, transpose_b)
self.matmul2 = P.MatMul(transpose_a, transpose_b)
self.pow = P.Pow()
self.reduce_sum = P.ReduceSum()
def construct(self, x, y, z):
out = self.matmul1(x, y)
out = self.matmul2(out, z)
out = self.pow(out,2.0)
out = self.reduce_sum(out, None)
return out
class MatmulReduce(Cell):
def __init__(self, group, transpose_a=False, transpose_b=False):
super(MatmulReduce, self).__init__()
self.matmul1 = P.MatMul(transpose_a, transpose_b)
self.allreduce1 = P.AllReduce(group=group)
self.matmul2 = P.MatMul(transpose_a, transpose_b)
self.pow = P.Pow()
self.reduce_sum = P.ReduceSum()
self.allreduce2 = P.AllReduce(group=group)
def construct(self, x, y, z):
out = self.matmul1(x, y)
out = self.allreduce1(out)
out = self.matmul2(out, z)
out = self.pow(out,2.0)
out = self.reduce_sum(out, None)
out = self.allreduce2(out)
return out
class Grad(Cell):
def __init__(self, network):
super(Grad, self).__init__()
self.network = network
def construct(self, x, y, z, sens):
return grad_all_with_sens(self.network)(x, y, z, sens)
class MatmulReduceFactory:
def __init__(self, inputx_shape, inputy_shape, inputz_shape, x_stra, y_stra, z_stra):
self.inputx=self.GenValue(inputx_shape, 10)
self.inputy=self.GenValue(inputy_shape, 20)
self.inputz=self.GenValue(inputz_shape, 30)
self.x_stra = x_stra
self.y_stra = y_stra
self.z_stra = z_stra
stra_size= 1
for s in x_stra:
stra_size = stra_size*s
self.stra_size = stra_size
def GenValue(self, input_shape, delta):
size = 1
for s in input_shape:
size = size*s
number_range = min(100, size)
input_np = np.reshape(np.arange(0, size)%number_range - delta, input_shape).astype(np.float32)
return input_np
def get_parallel_blocks(self, input_, strategy):
blocks = [input_]
i = 0
for stra in strategy:
temp = []
while len(blocks)>0:
block = blocks.pop(0)
temp.extend(np.split(block, stra, axis=i))
blocks.extend(temp)
i+=1
return blocks
def grad_mindspore_impl_single(self):
x=Tensor(self.inputx)
y=Tensor(self.inputy)
z=Tensor(self.inputz)
sens=Tensor(1.0, dtype=ms.float32)
net = MatmulSingle()
grad_net = Grad(net)
grad_net.set_train()
input_grad = grad_net(x, y, z, sens)
return input_grad
def grad_mindspore_impl_reduce(self):
inputxs = self.get_parallel_blocks(self.inputx, self.x_stra)
inputys = self.get_parallel_blocks(self.inputy, self.y_stra)
inputzs = self.get_parallel_blocks(self.inputz, self.z_stra)
x = Tensor(inputxs[device_id%self.stra_size])
y = Tensor(inputys[device_id%self.stra_size])
z = Tensor(inputzs[device_id%self.stra_size])
repeat_num = device_num/self.stra_size
v = self.stra_size*repeat_num*repeat_num*repeat_num
sens = Tensor(1.0/v, dtype=ms.float32)
net = MatmulReduce("hccl_world_group")
grad_net = Grad(net)
grad_net.set_train()
input_grad = grad_net(x, y, z, sens)
return input_grad
def grad_cmp(self):
single_results = self.grad_mindspore_impl_single()
reduce_results = self.grad_mindspore_impl_reduce()
single_result0 = self.get_parallel_blocks(single_results[0].asnumpy(), self.x_stra)[device_id%self.stra_size]
reduce_result0 = reduce_results[0].asnumpy()
single_result1 = self.get_parallel_blocks(single_results[1].asnumpy(), self.y_stra)[device_id%self.stra_size]
reduce_result1 = reduce_results[1].asnumpy()
single_result2 = self.get_parallel_blocks(single_results[2].asnumpy(), self.z_stra)[device_id%self.stra_size]
reduce_result2 = reduce_results[2].asnumpy()
assert np.allclose(single_result0, reduce_result0, 0.0001, 0.0001)
assert np.allclose(single_result1, reduce_result1, 0.0001, 0.0001)
assert np.allclose(single_result2, reduce_result2, 0.0001, 0.0001)
def test_reduce_grad():
inputx_shape = (32,64)
inputy_shape = (64,64)
inputz_shape = (64,32)
fact = MatmulReduceFactory(inputx_shape, inputy_shape, inputz_shape, (1,4), (4,1), (1,4))
fact.grad_cmp()
def test_reduce_grad_repeat():
inputx_shape = (32,64)
inputy_shape = (64,64)
inputz_shape = (64,32)
fact = MatmulReduceFactory(inputx_shape, inputy_shape, inputz_shape, (1,2), (2,1), (1,2))
fact.grad_cmp()
| 39.174699 | 118 | 0.636475 |
ace9c32e5e21f4c7fb2181d00a30202cc2917d2a | 2,686 | py | Python | opac/tests/services/holding/lend.py | rimphyd/Django-OPAC | d86f2e28fee7f2ec551aeeb98ec67caefc06a3fb | [
"MIT"
] | 1 | 2020-11-26T05:25:46.000Z | 2020-11-26T05:25:46.000Z | opac/tests/services/holding/lend.py | rimphyd/Django-OPAC | d86f2e28fee7f2ec551aeeb98ec67caefc06a3fb | [
"MIT"
] | null | null | null | opac/tests/services/holding/lend.py | rimphyd/Django-OPAC | d86f2e28fee7f2ec551aeeb98ec67caefc06a3fb | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.utils import timezone
from opac.models.masters import Stock, User
from opac.models.transactions import Holding, Lending, Reservation
from opac.services.errors import LendingAlreadyExistsError
from opac.services.holding import HoldingLendService
class HoldingLendServiceSuccessTests(TestCase):
fixtures = ['masters_minimal']
def test_holding_and_lending_counts(self):
stock = Stock.objects.get(pk=1)
user = User.objects.get(pk=1)
holding = Holding.objects.create(
stock=stock,
user=user,
expiration_date=timezone.localdate()
)
self.assertEqual(Holding.objects.count(), 1)
self.assertEqual(Lending.objects.count(), 0)
HoldingLendService(holding).exec()
self.assertEqual(Holding.objects.count(), 0)
self.assertEqual(Lending.objects.count(), 1)
lending = Lending.objects.get(pk=1)
self.assertEqual(stock, lending.stock)
self.assertEqual(user, lending.user)
def test_has_no_effect_on_reservation(self):
stock = Stock.objects.get(pk=1)
user1 = User.objects.get(pk=1)
user2 = User.objects.get(pk=2)
holding = Holding.objects.create(
stock=stock,
user=user1,
expiration_date=timezone.localdate()
)
reservation = Reservation.objects.create(
stock=stock,
user=user2
)
self.assertEqual(Holding.objects.count(), 1)
self.assertEqual(Lending.objects.count(), 0)
self.assertEqual(Reservation.objects.count(), 1)
HoldingLendService(holding).exec()
self.assertEqual(Holding.objects.count(), 0)
self.assertEqual(Lending.objects.count(), 1)
self.assertEqual(Reservation.objects.count(), 1)
lending = Lending.objects.get(pk=1)
self.assertEqual(stock, lending.stock)
self.assertEqual(user1, lending.user)
self.assertEqual(reservation, Reservation.objects.get(pk=1))
class HoldingLendServiceFailureTests(TestCase):
fixtures = ['masters_minimal']
def test_lending_already_exists(self):
stock = Stock.objects.get(pk=1)
user1 = User.objects.get(pk=1)
user2 = User.objects.get(pk=2)
holding = Holding.objects.create(
stock=stock,
user=user1,
expiration_date=timezone.localdate()
)
Lending.objects.create(
stock=stock,
user=user2,
due_date=timezone.localdate()
)
service = HoldingLendService(holding)
self.assertRaises(LendingAlreadyExistsError, service.exec)
| 35.813333 | 68 | 0.652271 |
ace9c3e895d4bfcc43c1842ba9e476e0fbd9e86a | 4,800 | py | Python | web/transiq/enquiry/models.py | manibhushan05/transiq | 763fafb271ce07d13ac8ce575f2fee653cf39343 | [
"Apache-2.0"
] | null | null | null | web/transiq/enquiry/models.py | manibhushan05/transiq | 763fafb271ce07d13ac8ce575f2fee653cf39343 | [
"Apache-2.0"
] | 14 | 2020-06-05T23:06:45.000Z | 2022-03-12T00:00:18.000Z | web/transiq/enquiry/models.py | manibhushan05/transiq | 763fafb271ce07d13ac8ce575f2fee653cf39343 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core.validators import MinLengthValidator
from django.db import models
from utils.models import City, VehicleCategory
class DailyRateEnquiry(models.Model):
rate_update_source = (
('transporter', 'Transporter'),
('supplier', 'Broker'),
('traffic_person', 'Traffic Person'),
)
name = models.CharField(max_length=70, blank=True, null=True)
phone = models.CharField(max_length=13, validators=[MinLengthValidator(10)], blank=True, null=True)
source_of_information = models.CharField(max_length=35, choices=rate_update_source, blank=True, null=True)
type_of_vehicle = models.ForeignKey(VehicleCategory, blank=True, null=True, on_delete=models.CASCADE)
loading_point = models.CharField(max_length=200, blank=True, null=True)
loading_city = models.ForeignKey(City, related_name='loading_city', on_delete=models.CASCADE)
unloading_point = models.CharField(max_length=200, blank=True, null=True)
unloading_city = models.ForeignKey(City, related_name='unloading_city', on_delete=models.CASCADE)
material = models.CharField(max_length=150, blank=True, null=True)
weight = models.CharField(max_length=35, blank=True, null=True)
rate = models.CharField(max_length=10, blank=True, null=True)
timestamp = models.DateTimeField(blank=True, null=True, verbose_name="datetime")
number_of_truck = models.CharField(max_length=5, blank=True, null=True)
comment = models.TextField(blank=True, null=True)
created_by = models.ForeignKey(User, null=True, on_delete=models.CASCADE, related_name="daily_rate_enquiry_created_by")
changed_by = models.ForeignKey(User, null=True, on_delete=models.CASCADE, related_name="daily_rate_enquiry_changed_by")
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
deleted = models.BooleanField(default=False)
deleted_on = models.DateTimeField(null=True, blank=True)
class Meta:
verbose_name_plural = "Daily Rate Enquiry Record"
def __str__(self):
return '%s , %s , %s' % (self.name, self.phone, self.created_on)
class EnquiryForm(models.Model):
name = models.CharField(max_length=70)
phone = models.CharField(max_length=17, validators=[MinLengthValidator(10)], blank=True, null=True)
email = models.EmailField(max_length=50, blank=True, null=True)
type_of_vehicle = models.ForeignKey(VehicleCategory, blank=True, null=True, on_delete=models.CASCADE)
loading_point = models.CharField(max_length=200, blank=True, null=True)
loading_city = models.ForeignKey(City, related_name='enquiry_loading_city', on_delete=models.CASCADE)
unloading_point = models.CharField(max_length=200, blank=True, null=True)
unloading_city = models.ForeignKey(City, related_name='enquiry_unloading_city', on_delete=models.CASCADE)
material = models.CharField(max_length=200, blank=True, null=True)
weight = models.CharField(max_length=35, blank=True, null=True)
date = models.DateField()
enquiry_date = models.DateField(auto_now_add=True, auto_now=False)
comment = models.TextField(blank=True, null=True)
created_by = models.ForeignKey(User, null=True, on_delete=models.CASCADE, related_name="enquiry_form_created_by")
changed_by = models.ForeignKey(User, null=True, on_delete=models.CASCADE, related_name="enquiry_form_changed_by")
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
deleted = models.BooleanField(default=False)
deleted_on = models.DateTimeField(null=True, blank=True)
class Meta:
verbose_name_plural = "General Enquiry Record"
def __str__(self):
return '%s , %s , %s' % (self.name, self.phone, self.created_on)
class ContactUsLandingPage(models.Model):
name = models.CharField(max_length=255, blank=True, null=True)
phone = models.CharField(max_length=255, blank=True, null=True)
email = models.CharField(max_length=255, blank=True, null=True)
message = models.TextField(blank=True, null=True)
created_by = models.ForeignKey(User, null=True, on_delete=models.CASCADE, related_name="contact_us_landing_page_created_by")
changed_by = models.ForeignKey(User, null=True, on_delete=models.CASCADE, related_name="contact_us_landing_page_changed_by")
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
deleted = models.BooleanField(default=False)
deleted_on = models.DateTimeField(null=True, blank=True)
class Meta:
verbose_name_plural = "Contact Us Landing Page"
def __str__(self):
return "%s, %s, %s" % (self.name, self.phone, self.created_on)
| 53.932584 | 128 | 0.753542 |
ace9c40d28a9cbe0be48e8c03c305c68bcde8cab | 699 | py | Python | csob/resources/payment/process.py | druids/csob-paymentgateway | 64e5e84ad4a0239f716855e12ee70364ab4414df | [
"MIT"
] | null | null | null | csob/resources/payment/process.py | druids/csob-paymentgateway | 64e5e84ad4a0239f716855e12ee70364ab4414df | [
"MIT"
] | 3 | 2019-03-13T10:28:35.000Z | 2019-04-04T10:56:59.000Z | csob/resources/payment/process.py | druids/csob-paymentgateway | 64e5e84ad4a0239f716855e12ee70364ab4414df | [
"MIT"
] | 1 | 2019-03-11T02:32:14.000Z | 2019-03-11T02:32:14.000Z | from urllib.parse import urljoin
from csob.api_response import APIResponse
from csob.exceptions import GatewaySignatureInvalid
from . import PaymentCSOBResource
class PaymentProcessResource(PaymentCSOBResource):
url = 'payment/process/'
request_signature = ('merchantId', 'payId', 'dttm')
def get(self, pay_id: str):
return self._construct_url_and_get(self.get_base_json_with_pay_id(pay_id))
def parse_response_dict(self, response: dict):
is_verified = self.verify_signature(response)
if is_verified is False and self.raise_exception:
raise GatewaySignatureInvalid()
return APIResponse(is_verified=is_verified, parsed_data=response)
| 33.285714 | 82 | 0.759657 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.