text stringlengths 4 1.02M | meta dict |
|---|---|
"""TVM operator upsampling compute."""
from __future__ import absolute_import
import topi
from ..util import simplify
def upsampling(data, scale, layout="NCHW", method='nearest_neighbor', align_corners=False):
"""Perform upsampling on the data.
Nearest neighbor and bilinear upsampling are supported.
Parameters
----------
inputs : tvm.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
scale : int
Scaling factor
layout : string, optional
either "NCHW" or "NHWC"
method : {"bilinear", "nearest_neighbor", "bicubic"}
Method to be used for upsampling.
Returns
-------
output : tvm.Tensor
4-D with shape [batch, channel, in_height*scale, in_width*scale]
or [batch, in_height*scale, in_width*scale, channel]
"""
base_layout = layout[0:4]
if base_layout == "NCHW":
out_shape = (simplify(data.shape[2] * scale), simplify(data.shape[3] * scale))
elif layout == "NHWC":
out_shape = (simplify(data.shape[1] * scale), simplify(data.shape[2] * scale))
else:
raise ValueError("not support this layout {} yet".format(layout))
return topi.image.resize(data, out_shape, layout=layout,
method=method, align_corners=align_corners)
| {
"content_hash": "89c35b29cf53f4e2772b59480efd7bbe",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 91,
"avg_line_length": 33.65853658536585,
"alnum_prop": 0.6239130434782608,
"repo_name": "Huyuwei/tvm",
"id": "609213637cf458a9f34689890e12854a1f22e730",
"size": "2165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "topi/python/topi/nn/upsampling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6056"
},
{
"name": "C",
"bytes": "95567"
},
{
"name": "C++",
"bytes": "5569606"
},
{
"name": "CMake",
"bytes": "67305"
},
{
"name": "Go",
"bytes": "112376"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "173219"
},
{
"name": "JavaScript",
"bytes": "49801"
},
{
"name": "Makefile",
"bytes": "50818"
},
{
"name": "Objective-C",
"bytes": "15264"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "6775044"
},
{
"name": "Rust",
"bytes": "182027"
},
{
"name": "Scala",
"bytes": "184105"
},
{
"name": "Shell",
"bytes": "96633"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext as _
from taiga.base import exceptions as exc
from taiga.base import response
from taiga.base.api.viewsets import GenericViewSet
from taiga.base.utils import json
from taiga.projects.models import Project
from .exceptions import ActionSyntaxException
class BaseWebhookApiViewSet(GenericViewSet):
# We don't want rest framework to parse the request body and transform it in
# a dict in request.DATA, we need it raw
parser_classes = ()
# This dict associates the event names we are listening for
# with their reponsible classes (extending event_hooks.BaseEventHook)
event_hook_classes = {}
def _validate_signature(self, project, request):
raise NotImplemented
def _get_project(self, request):
project_id = request.GET.get("project", None)
try:
project = Project.objects.get(id=project_id)
return project
except Project.DoesNotExist:
return None
def _get_payload(self, request):
try:
payload = json.loads(request.body.decode("utf-8"))
except ValueError:
raise exc.BadRequest(_("The payload is not a valid json"))
return payload
def _get_event_name(self, request):
raise NotImplemented
def create(self, request, *args, **kwargs):
project = self._get_project(request)
if not project:
raise exc.BadRequest(_("The project doesn't exist"))
if not self._validate_signature(project, request):
raise exc.BadRequest(_("Bad signature"))
if project.blocked_code is not None:
raise exc.Blocked(_("Blocked element"))
event_name = self._get_event_name(request)
payload = self._get_payload(request)
event_hook_class = self.event_hook_classes.get(event_name, None)
if event_hook_class is not None:
event_hook = event_hook_class(project, payload)
try:
event_hook.process_event()
except ActionSyntaxException as e:
raise exc.BadRequest(e)
return response.NoContent()
| {
"content_hash": "7785e47eb3afeb72570db62b752d88e0",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 80,
"avg_line_length": 33.10769230769231,
"alnum_prop": 0.6514869888475836,
"repo_name": "curiosityio/taiga-docker",
"id": "6590832e5c7934debaade0d11fdd6e798b2ab6ab",
"size": "3064",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "taiga-back/taiga-back/taiga/hooks/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "186988"
},
{
"name": "JavaScript",
"bytes": "2007"
},
{
"name": "Nginx",
"bytes": "4140"
},
{
"name": "Python",
"bytes": "2793020"
},
{
"name": "Shell",
"bytes": "1392"
}
],
"symlink_target": ""
} |
import os
import pytest
import time
from multiprocessing import Process
from astropy import units as u
from pocs import POCS
from pocs import _check_config
from pocs import _check_environment
from pocs.utils import error
from pocs.utils.messaging import PanMessaging
@pytest.fixture
def pocs(config):
os.environ['POCSTIME'] = '2016-08-13 13:00:00'
pocs = POCS(simulator=['all'], run_once=True,
config=config,
ignore_local_config=True, db='panoptes_testing')
pocs.observatory.scheduler.fields_list = [
{'name': 'Wasp 33',
'position': '02h26m51.0582s +37d33m01.733s',
'priority': '100',
'exp_time': 2,
'min_nexp': 2,
'exp_set_size': 2,
},
]
yield pocs
pocs.power_down()
def test_simple_simulator(pocs):
assert isinstance(pocs, POCS)
def test_not_initialized(pocs):
assert pocs.is_initialized is not True
def test_run_without_initialize(pocs):
with pytest.raises(AssertionError):
pocs.run()
def test_initialization(pocs):
pocs.initialize()
assert pocs.is_initialized
def test_bad_pandir_env():
pandir = os.getenv('PANDIR')
os.environ['PANDIR'] = '/foo/bar'
with pytest.raises(SystemExit):
_check_environment()
os.environ['PANDIR'] = pandir
def test_bad_pocs_env():
pocs = os.getenv('POCS')
os.environ['POCS'] = '/foo/bar'
with pytest.raises(SystemExit):
_check_environment()
os.environ['POCS'] = pocs
def test_check_config1(config):
del config['mount']
with pytest.raises(SystemExit):
_check_config(config)
def test_check_config2(config):
del config['directories']
with pytest.raises(SystemExit):
_check_config(config)
def test_check_config3(config):
del config['state_machine']
with pytest.raises(SystemExit):
_check_config(config)
def test_make_log_dir():
log_dir = "{}/logs".format(os.getcwd())
assert os.path.exists(log_dir) is False
old_pandir = os.environ['PANDIR']
os.environ['PANDIR'] = os.getcwd()
_check_environment()
assert os.path.exists(log_dir) is True
os.removedirs(log_dir)
os.environ['PANDIR'] = old_pandir
def test_bad_state_machine_file():
with pytest.raises(error.InvalidConfig):
POCS.load_state_table(state_table_name='foo')
def test_load_bad_state(pocs):
with pytest.raises(error.InvalidConfig):
pocs._load_state('foo')
def test_default_lookup_trigger(pocs):
pocs.state = 'parking'
pocs.next_state = 'parking'
assert pocs._lookup_trigger() == 'set_park'
pocs.state = 'foo'
assert pocs._lookup_trigger() == 'parking'
def test_free_space(pocs):
assert pocs.has_free_space() is True
# Test something ridiculous
assert pocs.has_free_space(required_space=1e9 * u.gigabyte) is False
assert pocs.is_safe() is True
def test_is_dark_simulator(pocs):
pocs.initialize()
pocs.config['simulator'] = ['camera', 'mount', 'weather', 'night']
os.environ['POCSTIME'] = '2016-08-13 13:00:00'
assert pocs.is_dark() is True
os.environ['POCSTIME'] = '2016-08-13 23:00:00'
assert pocs.is_dark() is True
def test_is_dark_no_simulator_01(pocs):
pocs.initialize()
pocs.config['simulator'] = ['camera', 'mount', 'weather']
os.environ['POCSTIME'] = '2016-08-13 13:00:00'
assert pocs.is_dark() is True
def test_is_dark_no_simulator_02(pocs):
pocs.initialize()
pocs.config['simulator'] = ['camera', 'mount', 'weather']
os.environ['POCSTIME'] = '2016-08-13 23:00:00'
assert pocs.is_dark() is False
def test_is_weather_safe_simulator(pocs):
pocs.initialize()
pocs.config['simulator'] = ['camera', 'mount', 'weather']
assert pocs.is_weather_safe() is True
def test_is_weather_safe_no_simulator(pocs, db):
pocs.initialize()
pocs.config['simulator'] = ['camera', 'mount', 'night']
# Set a specific time
os.environ['POCSTIME'] = '2016-08-13 23:00:00'
# Insert a dummy weather record
db.insert_current('weather', {'safe': True})
assert pocs.is_weather_safe() is True
# Set a time 181 seconds later
os.environ['POCSTIME'] = '2016-08-13 23:05:01'
assert pocs.is_weather_safe() is False
def test_run_wait_until_safe(db):
os.environ['POCSTIME'] = '2016-08-13 23:00:00'
def start_pocs():
pocs = POCS(simulator=['camera', 'mount', 'night'],
messaging=True, safe_delay=15)
pocs.db.current.remove({})
pocs.initialize()
pocs.logger.info('Starting observatory run')
assert pocs.is_weather_safe() is False
pocs.send_message('RUNNING')
pocs.run(run_once=True, exit_when_done=True)
assert pocs.is_weather_safe() is True
pub = PanMessaging.create_publisher(6500)
sub = PanMessaging.create_subscriber(6511)
pocs_process = Process(target=start_pocs)
pocs_process.start()
# Wait for the running message
while True:
msg_type, msg_obj = sub.receive_message()
if msg_obj is None:
time.sleep(2)
continue
if msg_obj.get('message', '') == 'RUNNING':
time.sleep(2)
# Insert a dummy weather record to break wait
db.insert_current('weather', {'safe': True})
if msg_type == 'STATUS':
current_state = msg_obj.get('state', {})
if current_state == 'pointing':
pub.send_message('POCS-CMD', 'shutdown')
break
time.sleep(0.5)
pocs_process.join()
assert pocs_process.is_alive() is False
def test_unsafe_park(pocs):
pocs.initialize()
assert pocs.is_initialized is True
os.environ['POCSTIME'] = '2016-08-13 13:00:00'
assert pocs.state == 'sleeping'
pocs.get_ready()
assert pocs.state == 'ready'
pocs.schedule()
assert pocs.state == 'scheduling'
# My time goes fast...
os.environ['POCSTIME'] = '2016-08-13 23:00:00'
pocs.config['simulator'] = ['camera', 'mount', 'weather']
assert pocs.is_safe() is False
assert pocs.state == 'parking'
pocs.set_park()
pocs.clean_up()
pocs.goto_sleep()
assert pocs.state == 'sleeping'
def test_power_down_while_running(pocs):
assert pocs.connected is True
pocs.initialize()
pocs.get_ready()
assert pocs.state == 'ready'
pocs.power_down()
assert pocs.state == 'parked'
assert pocs.connected is False
def test_run_no_targets_and_exit(pocs):
os.environ['POCSTIME'] = '2016-08-13 23:00:00'
pocs.config['simulator'] = ['camera', 'mount', 'weather', 'night']
pocs.state = 'sleeping'
pocs.initialize()
assert pocs.is_initialized is True
pocs.run(exit_when_done=True)
assert pocs.state == 'sleeping'
def test_run(pocs):
os.environ['POCSTIME'] = '2016-09-09 08:00:00'
pocs.config['simulator'] = ['camera', 'mount', 'weather', 'night']
pocs.state = 'sleeping'
pocs._do_states = True
pocs.observatory.scheduler.add_observation({'name': 'KIC 8462852',
'position': '20h06m15.4536s +44d27m24.75s',
'priority': '100',
'exp_time': 2,
'min_nexp': 2,
'exp_set_size': 2,
})
pocs.initialize()
assert pocs.is_initialized is True
pocs.run(exit_when_done=True, run_once=True)
assert pocs.state == 'sleeping'
def test_run_interrupt_with_reschedule_of_target():
def start_pocs():
pocs = POCS(simulator=['all'], messaging=True)
pocs.logger.info('Before initialize')
pocs.initialize()
pocs.logger.info('POCS initialized, back in test')
pocs.observatory.scheduler.fields_list = [{'name': 'KIC 8462852',
'position': '20h06m15.4536s +44d27m24.75s',
'priority': '100',
'exp_time': 2,
'min_nexp': 1,
'exp_set_size': 1,
}]
pocs.run(exit_when_done=True, run_once=True)
pocs.logger.info('run finished, powering down')
pocs.power_down()
pub = PanMessaging.create_publisher(6500)
sub = PanMessaging.create_subscriber(6511)
pocs_process = Process(target=start_pocs)
pocs_process.start()
while True:
msg_type, msg_obj = sub.receive_message()
if msg_type == 'STATUS':
current_state = msg_obj.get('state', {})
if current_state == 'pointing':
pub.send_message('POCS-CMD', 'shutdown')
break
pocs_process.join()
assert pocs_process.is_alive() is False
def test_run_power_down_interrupt():
def start_pocs():
pocs = POCS(simulator=['all'], messaging=True)
pocs.initialize()
pocs.observatory.scheduler.fields_list = [{'name': 'KIC 8462852',
'position': '20h06m15.4536s +44d27m24.75s',
'priority': '100',
'exp_time': 2,
'min_nexp': 1,
'exp_set_size': 1,
}]
pocs.logger.info('Starting observatory run')
pocs.run()
pocs_process = Process(target=start_pocs)
pocs_process.start()
pub = PanMessaging.create_publisher(6500)
sub = PanMessaging.create_subscriber(6511)
while True:
msg_type, msg_obj = sub.receive_message()
if msg_type == 'STATUS':
current_state = msg_obj.get('state', {})
if current_state == 'pointing':
pub.send_message('POCS-CMD', 'shutdown')
break
pocs_process.join()
assert pocs_process.is_alive() is False
| {
"content_hash": "8a941f38424e336f4e854c5c2d223bf0",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 99,
"avg_line_length": 29.11898016997167,
"alnum_prop": 0.5684405097772157,
"repo_name": "joshwalawender/POCS",
"id": "84ac9209126f142a305ddce5e087eafd32f104d8",
"size": "10279",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pocs/tests/test_pocs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "13166"
},
{
"name": "Python",
"bytes": "503517"
},
{
"name": "Shell",
"bytes": "1804"
}
],
"symlink_target": ""
} |
"""
Django settings for blog project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# BASE_DIR = "/Users/jmitch/desktop/blog/src/"
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sm@g)(fbwdh5wc*xe@j++m9rh^uza5se9a57c5ptwkg*b@ki0x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# third party
'crispy_forms',
'markdown_deux',
'pagedown',
# local apps
'comments',
'posts',
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
LOGIN_URL = "/login/"
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
#'/var/www/static/',
]
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_cdn")
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media_cdn")
| {
"content_hash": "45e7bac23b0b2a84b0407347fec2ac3c",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 91,
"avg_line_length": 23.445859872611464,
"alnum_prop": 0.6791632708503124,
"repo_name": "codingforentrepreneurs/Advancing-the-Blog",
"id": "043e39384ea885c232c133b2eef642934ff14fb4",
"size": "3681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/blog/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "52162"
},
{
"name": "HTML",
"bytes": "15634"
},
{
"name": "JavaScript",
"bytes": "321792"
},
{
"name": "Python",
"bytes": "31596"
}
],
"symlink_target": ""
} |
import sys
import subprocess
import shutil
import os
import signal
import stat
import time
# required for iscsi
# Use simplejson or Python 2.6 json, prefer simplejson.
try:
import simplejson as json
except ImportError:
import json
import urllib2
import ftplib
import mimetypes
import bz2
import tarfile
# used for random filenames...
import random
import base64
# FilePath abstraction
from twisted.python.filepath import FilePath
# Configuration parsing
import ConfigParser
import logging
LOG_FILENAME = '/tmp/'+os.path.basename(sys.argv[0])+'.log'
logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG)
class LogDevice():
def write(str):
print >>sys.stdout, str
logging.debug(str)
## Block Ctrl-C and other naughty signals - All resistence is futile!
#signal.signal(1,None)
#signal.signal(2,None)
#signal.signal(15,None)
# REMOVE THESE GLOBALS!
# Each takes one argument via format, {0} containing the
# device
MKFS={
'ext3': ('mkfs.ext3','-F','-q','{device}'),
'reiserfs': ('mkfs.reiserfs','{device}'),
'xfs': ('mkfs.xfs','-f','{device}'),
'swap': ('mkswap','{device}'),
'keep': ('true','{device}')
}
def fail(msg):
print >>sys.stderr, msg
print >>sys.stderr, "\n"
sys.exit(1)
# Or-Die (do or die!)
# Execute arguments. Take last argument as error string
def ordie (*args):
try:
return submodule.check_call(args[:-1])
except:
return fail (args[-1])
# wstring simply writes a string to new file
def wstring(string,filename):
# Right teh filez LOL -KTHXBYE, LOLCATZ
tehfile=open(filename,'w')
tehfile.write(string)
tehfile.close()
# wstring writes a string to file
def astring(string,filename):
# Right teh filez LOL -LOLCATZ
tehfile=open(filename,'a')
# Pydocs say both that this should be a no-op
# BUT also say that some systems will not seek on their own?
# we're just being careful here...
tehfile.seek(0,os.SEEK_END)
tehfile.write(string)
tehfile.close()
def is_regularf(self,file):
# If not string, probably a FilePath
if type(file) == type(''):
path = file
else:
path = os.path.join(file.dirname(),file.basename())
if os.path.islink(path):
path=os.path.join(os.path.dirname(path), os.readlink(path))
try:
si=os.stat(path)
except OSError:
return False
# si[0] should contain st_mode, required by S_ISREG
if not stat.S_ISREG(si[0]):
return False
return True
# Sets class variables by args
def cvarargs (cls,cvars,**kwargs):
for key in kwargs:
if cvars.count(key) > 0:
# Assign variable to key where key is an
# allowed class variable
print("cls.{0}=kwargs[key] ({1})".format(key,kwargs[key]))
eval("cls.{0} = kwargs[key]".format(key))
pass
class Disk:
def __init__(self,**kwargs):
# Allowed class variables
data=kwargs
self.size=data['size']
self.location=data['location']
self.mntpnt=data['mntpnt']
self.ftype=data.has_key('ftype') and data['ftype'] or None
self.wipe=data['wipe']
self.volname=data['volname']
self.wipesrc=data['wipesrc']
self.method=data['method']
self.partition=data.has_key('partition') and data['partition'] or None
self.domount=data['mount']
self.mountoptions=data['options']
self.guest_name=data['guest_name']
self.dpathsuffix=None
self.real_mntpnt=None
self.guestdev=data['dev']+"1"
self.guestrdev=self.guestdev
if self.partition:
self.set_partitioned()
self.guestrdev=data['dev']
self._devpath=None
pass
def fstab(self):
mntcnt=0
if self.domount:
mntcnt=1
line="{0}\t{1}\t{2}\t{3}\t{4}\t{5}".format(
"/dev/"+self.guestdev,
self.mntpnt,
self.ftype,
self.mountoptions,
0,
mntcnt
)
return line
def devpath(self):
if self._devpath:
return self._devpath
dpath=''
if self.method=='LVM':
logging.debug("LVM device.")
dpath="/dev/mapper/{0}-{1}".format(self.location,self.volname)
elif self.method=='iSCSI':
logging.debug("iSCSI device.")
req=urllib2.urlopen(
"http://{0}:8080/iscsitadm/target/array002/{1}".format(
self.location, self.volname
)
).read()
diskinfo=json.loads(req)
if type(diskinfo) is dict:
iqn=diskinfo['array002/{0}'.format(self.volname)]['iSCSI Name']
if not iqn:
return None
dpath="/dev/disk/by-path/ip-{0}:3260-iscsi-{1}-lun-0".format(
self.location,iqn
)
if os.path.islink(dpath):
# Lets get rid of the symlinks and
# pretty up the display of the path we churn out
dpath=os.path.abspath(
os.path.join(os.path.dirname(dpath),
os.readlink(dpath))
)
if not self.check_exists(dpath):
sp1=subprocess.Popen(
('iscsiadm', '-m', 'discovery', '-t', 'sendtargets',
'-p', self.location),
stderr=subprocess.PIPE,stdout=subprocess.PIPE
)
sp1.wait()
sp2=subprocess.Popen(
('iscsiadm', '-m', 'node', '-l', '-T', iqn,
'-p', "{0}:3260".format(self.location)),
stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
sp2.wait()
logging.debug("Waiting for iSCSI disk to initialize...")
time.sleep(5)
else:
return None
else:
return fail("Disk method invalid")
if self.dpathsuffix:
dpath="{0}{1}".format(dpath,self.dpathsuffix)
if not self.check_exists(dpath):
logging.debug("File not found - giving up (path: {0}".format(dpath))
return None
self._devpath=dpath
return dpath
def check_exists(self,path):
if os.path.islink(path):
path=os.path.join(os.path.dirname(path), os.readlink(path))
try:
si=os.stat(path)
except OSError:
return False
# si[0] should contain st_mode, required by S_ISBLK
if not stat.S_ISBLK(si[0]):
return False
fail ('Block device does not exist')
return True
def create(disk):
if disk.devpath():
return False
# Create disks.
try:
print >>sys.stderr, "Creating disk.\n"
ex=('/etc/cloudinf/disk.d/{0}'.format(disk.method),
disk.size,
disk.volname,
disk.location )
sp=subprocess.Popen(
ex, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
soo=sp.communicate()
sp.wait()
except:
fail ("Could not create disk.")
if not disk.devpath():
fail ("Disk does not exist. Cannot continue.")
# Wiping is provided as a security measure to prevent
# data exposure. Simply zero'ing blocks is sufficient
# unless users have physical access to the device.
if disk.wipe > 0:
print >>sys.stderr, "Wiping block device (may take a while)\n"
sp0=subprocess.Popen(
('dd',"if={0}".format(disk.wipesrc),'bs=8M'),
stdout=subprocess.PIPE,stderr=sys.stdout
)
sp1=subprocess.Popen(
('pv'),
stdin=sp0.stdout, stdout=subprocess.PIPE, stderr=sys.stdout
)
sp2=subprocess.Popen(
('dd','of={0}'.format(disk.devpath()), 'bs=8M'),
stdin=sp1.stdout,stderr=sys.stdout
)
sp0.wait()
sp1.wait()
sp2.wait()
disk.wipe-=1
def set_partitioned(disk):
devpath=disk.devpath()
if not devpath:
disk.create()
devpath=disk.devpath()
# toss it into the device manager (multipath required)
ordie(("kpartx","-a","{0}p1".format(devpath)),"Kpartx failed")
# change the device path
disk.dpathsuffix="p1"
def format(disk):
global MKFS
if disk.is_mounted():
return False
if self.exported():
return False
devpath=disk.devpath()
if not devpath:
disk.create()
devpath=disk.devpath()
if not disk.partition:
disk.partition='N'
if disk.partition.upper() is not 'Y':
pass
else:
# make partition
ordie(("parted","-s",devpath,"mklabel","msdos"),"Mklabel failed")
ordie(
("parted","-s",devpath,"mkpart","primary","0",disk.partition),
"Mkpart failed"
)
# toss it into the device manager (multipath required)
ordie(("kpartx","-a","{0}p1".format(devpath)),"Kpartx failed")
# change the device path
disk.dpathsuffix="p1"
if not MKFS[disk.ftype]:
fail ('Filesystem choice invalid.')
#print "Building filesystem."
fscmd=map(lambda x: x.format(device=devpath), MKFS[disk.ftype])
sp=subprocess.Popen(fscmd,stdout=sys.stdout)
# Block return until format complete
return sp.wait()
def is_mounted(self,mntpnt=None,parent=None):
mntpnt=mntpnt or self.real_mountpoint(mntpnt,parent)
if os.path.ismount(mntpnt):
# Already mounted
return mntpnt
else:
return False
def real_mountpoint(self,mntpnt=None,parent=None):
if self.real_mntpnt:
return self.real_mntpnt
if mntpnt is None:
mntpnt=self.mntpnt
if parent is None:
parent=os.path.join("/mnt/",self.volname.strip('/'))
mntpnt=os.path.join(parent,mntpnt.strip('/'))
self.real_mntpnt = mntpnt
return mntpnt
def exported(self):
sp0=subprocess.Popen(
("xm", "list", self.guest_name),
stderr=subprocess.PIPE
)
if sp0.wait() == 0:
return True
return False
# Optionally accept parent argument to mount under a sub-dir.
def mount(self,mntpnt=None,parent=None):
if not self.domount:
return None
devpath=self.devpath()
if not self.check_exists(devpath):
fail ("Disk does not exist. Cannot continue.")
mntpnt=self.real_mountpoint(mntpnt,parent)
if self.is_mounted():
# Already mounted
return mntpnt
if self.exported():
fail ("Xen guest running.")
# mkdir
try:
os.mkdir(mntpnt,750)
except OSError:
pass
# mount
ex=("mount",devpath,mntpnt)
sp=subprocess.Popen(
("mount", devpath, mntpnt), stdout=sys.stdout,stderr=sys.stderr
)
if sp.wait() != 0:
fail ("Mount fail.")
return mntpnt
def umount(self,mntpnt=None,parent=None):
if not self.is_mounted():
return False
mntpnt=self.real_mountpoint(mntpnt,parent)
sp=subprocess.Popen(
("umount", mntpnt), stdout=sys.stdout, stderr=sys.stderr
)
if sp.wait() != 0:
# if failure, we kill processes and try again.
# we don't want to kill processes if at all possible,
# so this is only done as a last-resort
# Must chdir out of the mntpnt, if necessary...
os.chdir("/tmp")
subprocess.call(("fuser","-k","-9","-c",mntpnt))
subprocess.check_call(("sync"))
subprocess.check_call(("sync"))
subprocess.call(("umount",mntpnt))
# Second time a charm
sp=subprocess.Popen(
("umount",mntpnt),stdout=sys.stdout,stderr=sys.stderr
)
if sp.wait() != 0:
print >>sys.stderr, "umount error."
raise
return True
def enumerate(self):
return {
'-class': self.method,
'-dev': self.guestdev,
'-rdev': self.guestrdev,
'-path': self.devpath(),
'-size': self.size,
'-location': self.location,
'-mntpnt': self.mntpnt,
'-fstype': self.ftype,
'-wipe': self.wipe,
'-volname': self.volname,
'-wipesrc': self.wipesrc,
'-partition': self.partition,
'-domount': self.domount,
'-mountoptions': self.mountoptions,
'-dpathsuffix': self.dpathsuffix,
}
# Basic Time class
class Time(object):
def seconds(cnt):
return cnt
def minutes(cnt):
return 60*cnt
def hours(cnt):
return minutes(1)*60*cnt
def hour():
return hours(1)
def days(cnt):
return hours(1)*24*cnt
def months(cnt):
return int(days(1)*30.5*cnt)
def years(cnt):
return months(1)*cnt
import traceback
import pickle
# Define a forker!
# A good plan when doing a chroot or such...
class Fork(object):
def __init__ (self, timeout=None):
self.timeout = timeout
def __call__(self,f):
def fork_wrapper(*args):
def timeout(signum,frame):
raise IOError(
'Took longer than {0} seconds!'.format(self.timeout)
)
# Yes, the variable names are cute, but shouldn't be
# distracting...
# fifo is a fifo, jack is our pid,
# fee is the client's fifo-fh.
# fum is the server's fifo-fh.
rnd=base64.urlsafe_b64encode(str(random.getrandbits(16)))
filename='/tmp/'+os.path.basename(sys.argv[0])+'.'+rnd+'.ipc'
fifo=os.mkfifo(filename)
jack=os.fork()
if jack == 0:
try:
fee=open(filename,'wb')
result=f(*args)
# pickle arg[2] is negative, for highest version
# otherwise get version 0 & unicode error
pickle.dump(result,fee,-1)
fee.flush()
fee.close()
except:
traceback.print_exc()
os._exit(1)
os._exit(0)
if self.timeout:
signal.signal(signal.SIGALRM, timeout)
signal.alarm(self.timeout)
fum=open(filename, 'rb')
jackret=fum.read()
cexit=os.waitpid(jack,0)
if self.timeout:
signal.alarm(0)
os.unlink(filename)
if len(jackret) == 0:
return True if cexit == 0 else False
else:
return pickle.loads(jackret)
return fork_wrapper
def do_format(fs=None,fschoice=None):
global dsklst
rootmounted=False
# Format and mount disks
if fs:
disks={ fs : dsklst[fs] }
else:
disks=dsklst
# Check that NONE are mounted first
for mntpnt,disk in disks.items():
if disk.is_mounted():
return False
for mntpnt,disk in disks.items():
if not disk.ftype:
disk.ftype = fschoice
disk.format()
return True
@Fork(timeout=3600)
def do_debootstrap(suite,distro=None,arch=None,mirror=None):
mntpnt=dsklst['/'].mount()
if not dsklst['/'].is_mounted():
return False
arch = arch or 'amd64'
distro = distro or {
'lenny': 'debian',
'etch': 'debian',
'dapper': 'ubuntu',
'hardy': 'ubuntu',
'jaunty': 'ubuntu',
'karmic': 'ubuntu',
'lucid': 'ubuntu',
}[suite]
mirror = mirror or {
'debian': 'ftp://ftp.grokthis.net/debian',
'ubuntu': 'ftp://ftp.grokthis.net/ubuntu',
}[distro]
if distro=='debian':
subprocess.call(
('debootstrap','--arch',arch,suite,mntpnt,mirror),stdout=sys.stdout)
elif distro=='ubuntu':
subprocess.call(
('debootstrap', '--no-resolve-deps', '--exclude=console-setup',
'--arch',arch,suite,mntpnt,mirror),stdout=sys.stdout
)
else:
fail("Unknown distribution. Pass 'distro' option to debootstrap")
def do_fstab(part=None):
global dsklst
if part:
return dsklst[part].fstab()
return [ dsklst[x].fstab() for x in dsklst ]
# Extract a tarball
@Fork(timeout=1800)
def do_extract(dest, file):
uh=open(file)
tf=tarfile.open(mode='r|*',fileobj=uh)
tf.extractall()
@Fork(timeout=1800)
def do_sed(rxp, file):
fp=os.path.join(file.dirname(),file.basename())
out=StringIO.StringIO()
sp=subprocess.Popen(
("sed","-n","-e",rxp,fp),stdout=output,stderr=sys.stderr
)
sp.wait()
return ''.join(out.readlines())
@Fork(timeout=1800)
def do_urlextract(dest, url):
global dsklst
dest=FilePath(dest)
# Don't do this if not mounted!
mntpnt=dsklst['/'].real_mountpoint()
if not os.path.ismount(mntpnt):
return False
if not dest.isdir():
return False
try:
uh=urllib2.urlopen(url)
tf=tarfile.open(mode='r|*',fileobj=uh)
os.chroot(mntpnt)
os.chdir(os.path.join(dest.dirname(),dest.basename()))
tf.extractall()
except:
traceback.print_exc()
os.chdir('/')
def do_getDisks():
global dsklst
return [ dsklst[k].enumerate() for k in dsklst ]
@Fork(timeout=1800)
def do_rawriteurl(url):
global dsklst
# Don't do this if mounted!
mntpnt=dsklst['/'].real_mountpoint()
if os.path.ismount(mntpnt):
return False
ddof=open(dsklst['/'].devpath(),'w+b')
uh=urllib2.urlopen(url)
tf=tarfile.open(mode='r|*',fileobj=uh)
ddif=tf.extractfile(tf.next)
for buf in ddif.read(4096):
ddof.write(buf)
ddof.flush()
ddof.clone()
@Fork(timeout=1800)
def do_mount(path):
global dsklst
try:
if dsklst[path].mount():
return True
except:
pass
return False
@Fork(timeout=1800)
def do_umount(path):
global dsklst
try:
dsklst[path].umount()
except:
pass
return not dsklst[path].is_mounted()
@Fork(timeout=1800)
def do_peekfs(cmd,path,*args):
global dsklst
# wstring simply writes a string to new file
def _wstring(fp):
def _wrap(path,string):
# Right teh filez LOL -KTHXBYE, LOLCATZ
tehfile=path.open('w')
tehfile.write(string)
tehfile.close()
return lambda *args: _wrap(fp,*args)
def _wget(fp):
def _wrap(path,url):
req=urllib2.urlopen(url).read()
tehfile=path.open('wb')
tehfile.write(req)
tehfile.close()
return lambda *args: _wrap(fp,*args)
# wstring writes a string to file
def _astring(fp):
def _wrap(path,string):
tehfile=path.open('a')
# Pydocs say both that this should be a no-op
# BUT also say that some systems will not seek on their own?
# we're just being careful here...
tehfile.seek(0,os.SEEK_END)
tehfile.write(string)
tehfile.close()
return lambda *args: _wrap(fp,*args)
# Templating engine
def _template(fp):
def _wrap(path,**template):
scratchfile=path.dirname()+"."+path.basename()+".tmp"
fh=path.open('r')
sfp=FilePath(scratchfile)
sfh=sfp.open('w')
seeklast=0
for buffer in fh.readlines():
for line in buffer:
sfh.write(line.format(**template))
sfh.flush()
sfh.close()
fh.close()
sfp.moveTo(path.realpath())
return lambda *args: _wrap(fp,*args)
def _sed(fp):
return lambda rxp: do_sed(rxp,fp)
def _urlextract(fp):
return lambda url: do_urlextract(fp,url)
def _extract(fp):
return lambda *args: do_extract(fp,*args)
def _moveTo(fp):
return lambda path: fp.moveTo(FilePath(path))
def _copyTo(fp):
return lambda path: fp.copyTo(FilePath(path))
def _ls(fp):
def _wrap(fp,glob="*"):
map (lambda f: f.basename(), fp.globChildren(glob))
return lambda *args: _wrap(fp,*args)
def _b64get(fp):
return lambda: base64.b64encode(fp.getContent())
def _b64put(fp):
def _wrap(path,content):
if path.exists and not is_regularf(path):
return False
return path.setContent(base64.b64decode(content))
return lambda *args: _wrap(fp,*args)
mntpnt=dsklst['/'].real_mountpoint()
# Make sure the user mounts us, don't auto-mount
if not os.path.ismount(mntpnt):
return False
os.chdir(mntpnt)
os.chroot(mntpnt)
pp=FilePath('/')
# We're safe /w preauthChild since we're chroot'ed
fp=pp.preauthChild(path)
"""
Mapping the t.p.f.FilePath methods
which we will allow, to human-names
we can accessed via cmd arg
"""
return {
'chmod': fp.chmod,
'getsize': fp.getsize,
'exists': fp.exists,
'isdir': fp.isdir,
'isfile': fp.isfile,
'islink': fp.islink,
'isabs': fp.isabs,
#'listdir': fp.listdir,
'ls': fp.listdir,
'dir': fp.listdir,
'splitext': fp.splitext,
'touch': fp.touch,
'rm': fp.remove,
'makedirs': fp.makedirs,
'basename': fp.basename,
'dirname': fp.dirname,
'parent': fp.parent,
'mkdir': fp.createDirectory,
'sed': _sed(fp),
'cp': _copyTo(fp),
'mv': _moveTo(fp),
'append': _astring(fp),
'put': _wstring(fp),
'apply_template': _template(fp),
#'wget': _wget(fp),
#'urlextract': _urlextract(fp),
#'extract': _extract(fp),
'get': fp.getContent,
'b64get': _b64get(fp),
'b64put': _b64put(fp),
#'ls': _ls(fp),
#'mknod': _mknod(fp)
}[cmd](*args)
dsklst={}
def main(argv=None):
argv = argv or sys.argv
global dsklst
# Receive input via stdin & json
jsonargs=json.load(sys.stdin)
client=jsonargs['client']
cmdargs=jsonargs['cmd']
cmd=cmdargs.pop(0)
partitioned=False
if client.has_key('disk_partitioned') and client['disk_partitioned'] == 1:
partitioned=True
cp=ConfigParser.RawConfigParser()
cp.read('/etc/cloudinf/config.ini')
r_driver=cp.get('root','driver')
r_method=cp.get(r_driver,'method')
r_location=cp.get(r_driver,'location')
s_driver=cp.get('swap','driver')
s_method=cp.get(s_driver,'method')
s_location=cp.get(s_driver,'location')
# My shift from a struct to a class-based system...
dsklst={
'/': Disk(
method=r_method,
size=client['block_storage'],
location=r_location,
mntpnt= '/',
mount=True,
#ftype= fschoice.lower(),
wipe= 0,
volname= client['username'],
wipesrc= '/dev/zero',
#partition= partchoice,
partition=partitioned,
dev=client['disk_namespace']+"a",
options="defaults,noatime",
guest_name= client['username'],
),
'swap': Disk(
method=s_method,
size= '{0}M'.format(int(client['memory'])*2),
location=s_location,
mntpnt= 'none',
mount=False,
ftype= 'swap',
wipe= 0,
volname= "{0}swap".format(client['username']),
wipesrc= '/dev/zero',
partition = False,
dev=client['disk_namespace']+"b",
options="defaults",
guest_name= client['username'],
)
}
cmdtable={
#'putf': wstring,
#'appendf': astring,
#'extract': do_extract,
'fstab': do_fstab,
'urlextract': do_urlextract,
#'rawrite': do_rawriteurl,
'mkfs': do_format,
'debootstrap': do_debootstrap,
'peekfs': do_peekfs,
'mount': do_mount,
'umount': do_umount,
'_getdisks': do_getDisks,
}
# Sanity checks...
if client['username'].find("..") != -1:
fail ("Guest '{0}' specified is invalid".format(client['username']))
if client['username'].find("/") != -1:
fail ("Guest '{0}' specified is invalid".format(client['username']))
instdir=os.path.join("/mnt/",client['username'])
# Call given command & print json out
json.dump(cmdtable[cmd](*cmdargs),sys.stdout)
sys.exit(0)
if __name__ == "__main__":
sys.exit(main())
| {
"content_hash": "2eef5b0297d5ef8d1ab8fbc914711618",
"timestamp": "",
"source": "github",
"line_count": 896,
"max_line_length": 80,
"avg_line_length": 28.395089285714285,
"alnum_prop": 0.5384403741844195,
"repo_name": "ewindisch/nodestored",
"id": "f779607df5b7366de7fe30bd47494df411dcadf9",
"size": "26244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nodestored.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26244"
}
],
"symlink_target": ""
} |
from pyxb_114.bundles.opengis.citygml.raw.building import *
| {
"content_hash": "aa35200a73c1564daf219543b00c1d94",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 59,
"avg_line_length": 60,
"alnum_prop": 0.8166666666666667,
"repo_name": "msherry/PyXB-1.1.4",
"id": "30b9b4dc3726db53830cbdfdeea5929e3b4d94dd",
"size": "60",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyxb_114/bundles/opengis/citygml/building.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6307"
},
{
"name": "Python",
"bytes": "1521054"
},
{
"name": "Shell",
"bytes": "23730"
}
],
"symlink_target": ""
} |
from rest_framework.renderers import JSONRenderer
from djangorestframework_camel_case.util import camelize
class CamelCaseJSONRenderer(JSONRenderer):
def render(self, data, *args, **kwargs):
return super(CamelCaseJSONRenderer, self).render(camelize(data), *args,
**kwargs) | {
"content_hash": "686374a00cc8c9abf7404bd5851fb142",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 79,
"avg_line_length": 42.875,
"alnum_prop": 0.6501457725947521,
"repo_name": "virtusize/djangorestframework-camel-case",
"id": "13c36bdb36a13afcd70fd464601b7981f9895e00",
"size": "367",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "djangorestframework_camel_case/render.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "991"
},
{
"name": "Python",
"bytes": "8156"
}
],
"symlink_target": ""
} |
import unittest
from biicode.common.model.brl.brl_block import BRLBlock
from biicode.common.model.brl.cell_name import CellName
from biicode.common.model.symbolic.block_version import BlockVersion
from biicode.common.model.symbolic.reference import Reference, References
import copy
class ReferenceTest(unittest.TestCase):
def test_name(self):
m = BRLBlock('owner/user/block/branch')
mv = BlockVersion(m, 3)
r = Reference(mv, CellName('path/to/file.h'))
self.assertEqual('user/block/path/to/file.h', r.block_cell_name())
def test_deepcopy(self):
r = References()
r[BlockVersion(BRLBlock('user/user/block/branch'), 3)].add(CellName('f1.h'))
r2 = copy.deepcopy(r)
self.assertEqual(r, r2)
def test_references(self):
r = References()
bv3 = BlockVersion(BRLBlock('user/user/block/master'), 3)
bv4 = BlockVersion(BRLBlock('user/user/block/master'), 4)
cn0 = CellName('foo.h')
cn1 = CellName('foo1.h')
r[bv3].add(cn0)
r[bv3].add(cn1)
r[bv4].add(cn0)
l = r.explode()
self.assertEqual({(bv3, cn0), (bv3, cn1), (bv4, cn0)}, set(l))
# r.discard(Reference(bv4, cn0))
# l=r.explode()
# self.assertEqual({(bv3,cn0), (bv3,cn1)},set(l))
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| {
"content_hash": "8fdbbf6143756835eb1ec8e19576eadf",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 84,
"avg_line_length": 33.476190476190474,
"alnum_prop": 0.620199146514936,
"repo_name": "franramirez688/common",
"id": "4d8fad2d12da2ff24b7b937ee509324558f1e046",
"size": "1406",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "test/model/symbolic/reference_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3157300"
},
{
"name": "C++",
"bytes": "4667113"
},
{
"name": "CMake",
"bytes": "25379"
},
{
"name": "FORTRAN",
"bytes": "3691"
},
{
"name": "Java",
"bytes": "4201"
},
{
"name": "JavaScript",
"bytes": "172849"
},
{
"name": "Makefile",
"bytes": "6333"
},
{
"name": "Objective-C",
"bytes": "826"
},
{
"name": "Python",
"bytes": "714678"
},
{
"name": "Shell",
"bytes": "645"
}
],
"symlink_target": ""
} |
from fun_views.patterns.update.render import update_render_pattern
from fun_views.views.utils import (get_context_base, make_base_view,
not_set_get_form_class, not_set_get_obj,
not_set_get_template_name, prefer_func,
prefer_literal, render_response_base)
update_render_base = make_base_view(update_render_pattern)
def _init_form(req_data, form_class, obj):
return form_class(instance=obj)
def update_render(obj=None,
get_obj=not_set_get_obj,
form_class=None,
get_form_class=not_set_get_form_class,
init_form=_init_form,
obj_context_name='obj',
get_obj_context_name=None,
form_context_name='form',
get_form_context_name=None,
get_context=get_context_base,
template_name=None,
get_template_name=not_set_get_template_name,
render_response=render_response_base):
return update_render_base(
prefer_literal(obj, get_obj),
prefer_literal(form_class, get_form_class),
init_form,
prefer_func(obj_context_name, get_obj_context_name),
prefer_func(form_context_name, get_form_context_name),
get_context,
prefer_literal(template_name, get_template_name),
render_response
)
| {
"content_hash": "a046119a4c18a4439db2abc00d363388",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 75,
"avg_line_length": 39.54054054054054,
"alnum_prop": 0.5645933014354066,
"repo_name": "keithasaurus/django_fun_views",
"id": "d20de46851b4741b832e367c89ee174703d08231",
"size": "1463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fun_views/views/generic/update/render.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80355"
}
],
"symlink_target": ""
} |
import redis
import time
from bigdl.serving.schema import *
import httpx
import json
import uuid
from bigdl.serving.log4Error import invalidInputError
RESULT_PREFIX = "cluster-serving_"
def http_json_to_ndarray(json_str):
# currently there is no http user use batch predict, so batch is not implemented here
# to add batch predict, replace 0 index to [0, batch_size)
res_dict = json.loads(json.loads(json.loads(json_str)["predictions"][0])['value'])
data, shape = res_dict['data'], res_dict['shape']
array = np.array(data)
array = array.reshape(shape)
return array
def http_response_to_ndarray(response):
if response.status_code == 200:
response_str = response.text
return http_json_to_ndarray(response_str)
elif response.status_code == 400:
print("Invalid input format, valid example:")
print("""{
"instances": [
{
"tag": "foo",
"signal": [1, 2, 3, 4, 5],
"sensor": [[1, 2], [3, 4]]
}
]
}
""")
else:
print("Error when calling Cluster Serving Http server, error code:", response.status_code)
print("WARNING: Server returns invalid response, so you will get []")
return "[]"
def perdict(frontend_url, request_str):
httpx.post(frontend_url + "/predict", data=request_str)
class API:
"""
base level of API control
select data pipeline here, Redis/Kafka/...
interface preserved for API class
"""
def __init__(self, host=None, port=None, name="serving_stream"):
self.name = name
self.host = host if host else "localhost"
self.port = port if port else "6379"
self.db = redis.StrictRedis(host=self.host,
port=self.port, db=0)
try:
self.db.xgroup_create(name, "serving")
except Exception:
print("redis group exist, will not create new one")
class InputQueue(API):
def __init__(self, frontend_url=None, **kwargs):
super().__init__(**kwargs)
self.frontend_url = frontend_url
if self.frontend_url:
# frontend_url is provided, using frontend
try:
res = httpx.get(frontend_url)
if res.status_code == 200:
httpx.PoolLimits(max_keepalive=1, max_connections=1)
self.cli = httpx.Client()
print("Attempt connecting to Cluster Serving frontend success")
else:
invalidInputError(False, "connection error")
except Exception as e:
print("Connection error, please check your HTTP server. Error msg is ", e)
else:
self.output_queue = OutputQueue(**kwargs)
# TODO: these params can be read from config in future
self.input_threshold = 0.6
self.interval_if_error = 1
def predict(self, request_data, timeout=5):
"""
:param request_data:
:param time_sleep:
:return:
"""
def json_to_ndarray_dict(json_str):
ndarray_dict = {}
data_dict = json.loads(json_str)['instances'][0]
for key in data_dict.keys():
ndarray_dict[key] = np.array(data_dict[key])
return ndarray_dict
if self.frontend_url:
response = self.cli.post(self.frontend_url + "/predict", data=request_data)
predictions = json.loads(response.text)['predictions']
processed = predictions[0].lstrip("{value=").rstrip("}")
else:
try:
json.loads(request_data)
input_dict = json_to_ndarray_dict(request_data)
except Exception as e:
if isinstance(request_data, dict):
input_dict = request_data
else:
input_dict = {'t': request_data}
uri = str(uuid.uuid4())
self.enqueue(uri, **input_dict)
processed = "[]"
time_sleep = 0.001
while time_sleep < timeout:
processed = self.output_queue.query_and_delete(uri)
if processed != "[]":
break
time.sleep(time_sleep)
time_sleep += 0.001
return processed
def enqueue(self, uri, **data):
b64str = self.data_to_b64(**data)
d = {"uri": uri, "data": b64str}
self.__enqueue_data(d)
def data_to_b64(self, **data):
sink = pa.BufferOutputStream()
field_list = []
data_list = []
for key, value in data.items():
field, data = get_field_and_data(key, value)
field_list.append(field)
data_list.append(data)
schema = pa.schema(field_list)
batch = pa.RecordBatch.from_arrays(
data_list, schema)
writer = pa.RecordBatchStreamWriter(sink, batch.schema)
writer.write_batch(batch)
writer.close()
buf = sink.getvalue()
b = buf.to_pybytes()
b64str = self.base64_encode_image(b)
return b64str
def enqueue_tensor(self, uri, data):
"""
deprecated
"""
if isinstance(data, np.ndarray):
# tensor
data = [data]
if not isinstance(data, list):
invalidInputError(False,
"Your input is invalid, only List of ndarray and ndarray"
" are allowed.")
sink = pa.BufferOutputStream()
writer = None
for d in data:
shape = np.array(d.shape)
d = d.astype("float32").flatten()
data_field = pa.field("data", pa.list_(pa.float32()))
shape_field = pa.field("shape", pa.list_(pa.int64()))
tensor_type = pa.struct([data_field, shape_field])
tensor = pa.array([{'data': d}, {'shape': shape}],
type=tensor_type)
tensor_field = pa.field(uri, tensor_type)
schema = pa.schema([tensor_field])
batch = pa.RecordBatch.from_arrays(
[tensor], schema)
if writer is None:
# initialize
writer = pa.RecordBatchFileWriter(sink, batch.schema)
writer.write_batch(batch)
writer.close()
buf = sink.getvalue()
b = buf.to_pybytes()
tensor_encoded = self.base64_encode_image(b)
d = {"uri": uri, "data": tensor_encoded}
self.__enqueue_data(d)
def __enqueue_data(self, data):
inf = self.db.info()
try:
if inf['used_memory'] >= inf['maxmemory'] * self.input_threshold\
and inf['maxmemory'] != 0:
invalidInputError(False, "redis connetion error")
self.db.xadd(self.name, data)
print("Write to Redis successful")
except redis.exceptions.ConnectionError:
print("Redis queue is full, please wait for inference "
"or delete the unprocessed records.")
time.sleep(self.interval_if_error)
except redis.exceptions.ResponseError as e:
print(e, "Please check if Redis version > 5, "
"if yes, memory may be full, try dequeue or delete.")
time.sleep(self.interval_if_error)
@staticmethod
def base64_encode_image(img):
# base64 encode the input NumPy array
return base64.b64encode(img).decode("utf-8")
class OutputQueue(API):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def dequeue(self):
res_list = self.db.keys(RESULT_PREFIX + self.name + ':*')
decoded = {}
for res in res_list:
res_dict = self.db.hgetall(res.decode('utf-8'))
res_id = res.decode('utf-8').split(":")[1]
res_value = res_dict[b'value'].decode('utf-8')
if res_value == "NaN":
decoded[res_id] = "NaN"
else:
decoded[res_id] = self.get_ndarray_from_b64(res_value)
self.db.delete(res)
return decoded
def query_and_delete(self, uri):
return self.query(uri, True)
def query(self, uri, delete=False):
res_dict = self.db.hgetall(RESULT_PREFIX + self.name + ':' + uri)
if not res_dict or len(res_dict) == 0:
return "[]"
if delete:
self.db.delete(RESULT_PREFIX + self.name + ':' + uri)
s = res_dict[b'value'].decode('utf-8')
if s == "NaN":
return s
return self.get_ndarray_from_b64(s)
def get_ndarray_from_b64(self, b64str):
b = base64.b64decode(b64str)
a = pa.BufferReader(b)
c = a.read_buffer()
myreader = pa.ipc.open_stream(c)
r = [i for i in myreader]
invalidInputError(len(r) > 0, f"len(r) should be positive, but got ${len(r)}")
if len(r) == 1:
return self.get_ndarray_from_record_batch(r[0])
else:
l = []
for ele in r:
l.append(self.get_ndarray_from_record_batch(ele))
return l
def get_ndarray_from_record_batch(self, record_batch):
data = record_batch[0].to_numpy()
shape_list = record_batch[1].to_pylist()
shape = [i for i in shape_list if i]
ndarray = data.reshape(shape)
return ndarray
| {
"content_hash": "4bdbe511f8efdfc13609977cc9a1ebe0",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 98,
"avg_line_length": 34.10181818181818,
"alnum_prop": 0.5468116869268501,
"repo_name": "yangw1234/BigDL",
"id": "03d2a980db9f20c6e5f0ece822d2c9e7f8c61117",
"size": "9965",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/serving/src/bigdl/serving/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5342"
},
{
"name": "Dockerfile",
"bytes": "138760"
},
{
"name": "Java",
"bytes": "1321348"
},
{
"name": "Jupyter Notebook",
"bytes": "54063856"
},
{
"name": "Lua",
"bytes": "1904"
},
{
"name": "Makefile",
"bytes": "19253"
},
{
"name": "PowerShell",
"bytes": "1137"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "8762180"
},
{
"name": "RobotFramework",
"bytes": "16117"
},
{
"name": "Scala",
"bytes": "13216038"
},
{
"name": "Shell",
"bytes": "844916"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import os
import shutil
import tarfile
from pants.util.contextutil import open_tar
from pants.util.dirutil import safe_mkdir, safe_mkdir_for, safe_walk
class ArtifactError(Exception):
pass
class Artifact(object):
"""Represents a set of files in an artifact."""
def __init__(self, artifact_root):
# All files must be under this root.
self._artifact_root = artifact_root
# The files known to be in this artifact, relative to artifact_root.
self._relpaths = set()
def exists(self):
""":returns True if the artifact is available for extraction."""
raise NotImplementedError()
def get_paths(self):
for relpath in self._relpaths:
yield os.path.join(self._artifact_root, relpath)
def override_paths(self, paths): # Use with care.
self._relpaths = set([os.path.relpath(path, self._artifact_root) for path in paths])
def collect(self, paths):
"""Collect the paths (which must be under artifact root) into this artifact."""
raise NotImplementedError()
def extract(self):
"""Extract the files in this artifact to their locations under artifact root."""
raise NotImplementedError()
class DirectoryArtifact(Artifact):
"""An artifact stored as loose files under a directory."""
def __init__(self, artifact_root, directory):
super(DirectoryArtifact, self).__init__(artifact_root)
self._directory = directory
def exists(self):
return os.path.exists(self._directory)
def collect(self, paths):
for path in paths or ():
relpath = os.path.relpath(path, self._artifact_root)
dst = os.path.join(self._directory, relpath)
safe_mkdir(os.path.dirname(dst))
if os.path.isdir(path):
shutil.copytree(path, dst)
else:
shutil.copy(path, dst)
self._relpaths.add(relpath)
def extract(self):
for dir_name, _, filenames in safe_walk(self._directory):
for filename in filenames:
filename = os.path.join(dir_name, filename)
relpath = os.path.relpath(filename, self._directory)
dst = os.path.join(self._artifact_root, relpath)
safe_mkdir_for(dst)
shutil.copy(filename, dst)
self._relpaths.add(relpath)
class TarballArtifact(Artifact):
"""An artifact stored in a tarball."""
def __init__(self, artifact_root, tarfile_, compression=9):
super(TarballArtifact, self).__init__(artifact_root)
self._tarfile = tarfile_
self._compression = compression
def exists(self):
return os.path.isfile(self._tarfile)
def collect(self, paths):
# In our tests, gzip is slightly less compressive than bzip2 on .class files,
# but decompression times are much faster.
mode = 'w:gz'
tar_kwargs = {'dereference': True, 'errorlevel': 2, 'compresslevel': self._compression}
with open_tar(self._tarfile, mode, **tar_kwargs) as tarout:
for path in paths or ():
# Adds dirs recursively.
relpath = os.path.relpath(path, self._artifact_root)
tarout.add(path, relpath)
self._relpaths.add(relpath)
def extract(self):
try:
with open_tar(self._tarfile, 'r', errorlevel=2) as tarin:
# Note: We create all needed paths proactively, even though extractall() can do this for us.
# This is because we may be called concurrently on multiple artifacts that share directories,
# and there will be a race condition inside extractall(): task T1 A) sees that a directory
# doesn't exist and B) tries to create it. But in the gap between A) and B) task T2 creates
# the same directory, so T1 throws "File exists" in B).
# This actually happened, and was very hard to debug.
# Creating the paths here up front allows us to squelch that "File exists" error.
paths = []
dirs = set()
for tarinfo in tarin.getmembers():
paths.append(tarinfo.name)
if tarinfo.isdir():
dirs.add(tarinfo.name)
else:
dirs.add(os.path.dirname(tarinfo.name))
for d in dirs:
try:
os.makedirs(os.path.join(self._artifact_root, d))
except OSError as e:
if e.errno != errno.EEXIST:
raise
tarin.extractall(self._artifact_root)
self._relpaths.update(paths)
except tarfile.ReadError as e:
raise ArtifactError(str(e))
| {
"content_hash": "2f49e895917e547be95a9a4acf0c15e4",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 101,
"avg_line_length": 34.48091603053435,
"alnum_prop": 0.6572946646003985,
"repo_name": "manasapte/pants",
"id": "53b5906b9bd2391830d2bf5e68b7caf45bd64fc8",
"size": "4664",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "src/python/pants/cache/artifact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1746"
},
{
"name": "HTML",
"bytes": "79866"
},
{
"name": "Java",
"bytes": "438730"
},
{
"name": "JavaScript",
"bytes": "29992"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "5084384"
},
{
"name": "Scala",
"bytes": "84585"
},
{
"name": "Shell",
"bytes": "58748"
},
{
"name": "Thrift",
"bytes": "1966"
}
],
"symlink_target": ""
} |
import urllib2
import re
import dao
import time
from datetime import datetime
import threading,thread
from task_model import Task
import sys,os
# 条件变量
cv = threading.Condition()
def http_crawler(url,type):
content = http_request(url)
if type == 0:
page = parse_page_list(content)
else:
page = content
return page
def http_request(url):
#Request source file
request = urllib2.Request(url)
response = urllib2.urlopen(request)
page = response.read()
return page
def parse_page_list(page):
pattern = re.compile('<a href="http://yue.ifeng.com/news/detail_(.*?)" target="_blank">(.*?)</a>',re.S)
items = re.findall(pattern,page)
#for item in items:
# print item[0],item[1]
return items
def save_page(page,file_name):
#Save source file
project_path = os.getcwd()
ymd = datetime.now().strftime('%Y%m%d')
dir_path = project_path + "/pages/" +ymd +"/"
if not os.path.exists(dir_path):
os.makedirs(dir_path)
webFile = open(dir_path+file_name,'wb')
webFile.write(page)
webFile.close()
def now():
cur = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
return cur
def cond_wait(cv):
cv.wait()
def cond_signal(cond):
cond.acquire()
cond.notify()
cond.release()
def build_task(url):
task = Task(id=None,priority=0,type=1,state=0,link=url,avaliable_time=now(),start_time=None,end_time=None)
return task
def run():
while True:
print "开始处理任务"
task = dao.select(state=0)
cv.acquire()
while task == None:
cond_wait(cv)
cv.release()
ret = dao.update(state=1, update_time=now(), id=task.id)
if ret == 0:
print "任务已经被处理,直接跳出循环"
continue
page = http_crawler(task.link,task.type)
if task.type == 0:
print "处理列表任务...."
for item in page:
prefix = "http://yue.ifeng.com/news/detail_"
link = prefix + item[0]
new_task = build_task(link)
dao.insert(new_task)
cond_signal(cv)
dao.update(state=2, update_time=now(), id=task.id)
if task.type == 1:
file_name = task.link.split("/")[-1]
print "保存页面....",task.link,file_name
save_page(page,file_name)
ret = dao.update(state=2, update_time=now(), id=task.id)
print "任务完成"
if __name__ == '__main__':
if len(sys.argv) < 2:
print "请输入正确的命令"
print "eg: python list_crawler.py 5"
sys.exit()
num = sys.argv[1]
if num == None:
num = 1
else:
num = int(num)
print "开启" + str(num) +"个线程处理"
for i in range(num):
thread.start_new_thread(run())
| {
"content_hash": "2e69820dc792d0fdeee512a04facac48",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 110,
"avg_line_length": 26.38095238095238,
"alnum_prop": 0.5646209386281589,
"repo_name": "xpao24/Crawler",
"id": "6a87ac3e5d6e693dab8431bbadd5be5f1da43025",
"size": "2918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "list_crawler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9660"
}
],
"symlink_target": ""
} |
import copy
import inspect
from django.db import router
from django.db.models.query import QuerySet
from django.db.models import signals
from django.db.models.fields import FieldDoesNotExist
from django.utils import six
from django.utils.deprecation import RenameMethodsBase
def ensure_default_manager(sender, **kwargs):
"""
Ensures that a Model subclass contains a default manager and sets the
_default_manager attribute on the class. Also sets up the _base_manager
points to a plain Manager instance (which could be the same as
_default_manager if it's not a subclass of Manager).
"""
cls = sender
if cls._meta.abstract:
setattr(cls, 'objects', AbstractManagerDescriptor(cls))
return
elif cls._meta.swapped:
setattr(cls, 'objects', SwappedManagerDescriptor(cls))
return
if not getattr(cls, '_default_manager', None):
# Create the default manager, if needed.
try:
cls._meta.get_field('objects')
raise ValueError("Model %s must specify a custom Manager, because it has a field named 'objects'" % cls.__name__)
except FieldDoesNotExist:
pass
cls.add_to_class('objects', Manager())
cls._base_manager = cls.objects
elif not getattr(cls, '_base_manager', None):
default_mgr = cls._default_manager.__class__
if (default_mgr is Manager or
getattr(default_mgr, "use_for_related_fields", False)):
cls._base_manager = cls._default_manager
else:
# Default manager isn't a plain Manager class, or a suitable
# replacement, so we walk up the base class hierarchy until we hit
# something appropriate.
for base_class in default_mgr.mro()[1:]:
if (base_class is Manager or
getattr(base_class, "use_for_related_fields", False)):
cls.add_to_class('_base_manager', base_class())
return
raise AssertionError("Should never get here. Please report a bug, including your model and model manager setup.")
signals.class_prepared.connect(ensure_default_manager)
class RenameManagerMethods(RenameMethodsBase):
renamed_methods = (
('get_query_set', 'get_queryset', DeprecationWarning),
('get_prefetch_query_set', 'get_prefetch_queryset', DeprecationWarning),
)
class BaseManager(six.with_metaclass(RenameManagerMethods)):
# Tracks each time a Manager instance is created. Used to retain order.
creation_counter = 0
def __init__(self):
super(BaseManager, self).__init__()
self._set_creation_counter()
self.model = None
self._inherited = False
self._db = None
@classmethod
def _get_queryset_methods(cls, queryset_class):
def create_method(name, method):
def manager_method(self, *args, **kwargs):
return getattr(self.get_queryset(), name)(*args, **kwargs)
manager_method.__name__ = method.__name__
manager_method.__doc__ = method.__doc__
return manager_method
new_methods = {}
# Refs http://bugs.python.org/issue1785.
predicate = inspect.isfunction if six.PY3 else inspect.ismethod
for name, method in inspect.getmembers(queryset_class, predicate=predicate):
# Only copy missing methods.
if hasattr(cls, name):
continue
# Only copy public methods or methods with the attribute `queryset_only=False`.
queryset_only = getattr(method, 'queryset_only', None)
if queryset_only or (queryset_only is None and name.startswith('_')):
continue
# Copy the method onto the manager.
new_methods[name] = create_method(name, method)
return new_methods
@classmethod
def from_queryset(cls, queryset_class, class_name=None):
if class_name is None:
class_name = '%sFrom%s' % (cls.__name__, queryset_class.__name__)
class_dict = {
'_queryset_class': queryset_class,
}
class_dict.update(cls._get_queryset_methods(queryset_class))
return type(class_name, (cls,), class_dict)
def contribute_to_class(self, model, name):
# TODO: Use weakref because of possible memory leak / circular reference.
self.model = model
# Only contribute the manager if the model is concrete
if model._meta.abstract:
setattr(model, name, AbstractManagerDescriptor(model))
elif model._meta.swapped:
setattr(model, name, SwappedManagerDescriptor(model))
else:
# if not model._meta.abstract and not model._meta.swapped:
setattr(model, name, ManagerDescriptor(self))
if not getattr(model, '_default_manager', None) or self.creation_counter < model._default_manager.creation_counter:
model._default_manager = self
if model._meta.abstract or (self._inherited and not self.model._meta.proxy):
model._meta.abstract_managers.append((self.creation_counter, name,
self))
else:
model._meta.concrete_managers.append((self.creation_counter, name,
self))
def _set_creation_counter(self):
"""
Sets the creation counter value for this instance and increments the
class-level copy.
"""
self.creation_counter = BaseManager.creation_counter
BaseManager.creation_counter += 1
def _copy_to_model(self, model):
"""
Makes a copy of the manager and assigns it to 'model', which should be
a child of the existing model (used when inheriting a manager from an
abstract base class).
"""
assert issubclass(model, self.model)
mgr = copy.copy(self)
mgr._set_creation_counter()
mgr.model = model
mgr._inherited = True
return mgr
def db_manager(self, using):
obj = copy.copy(self)
obj._db = using
return obj
@property
def db(self):
return self._db or router.db_for_read(self.model)
def get_queryset(self):
"""
Returns a new QuerySet object. Subclasses can override this method to
easily customize the behavior of the Manager.
"""
return self._queryset_class(self.model, using=self._db)
def all(self):
# We can't proxy this method through the `QuerySet` like we do for the
# rest of the `QuerySet` methods. This is because `QuerySet.all()`
# works by creating a "copy" of the current queryset and in making said
# copy, all the cached `prefetch_related` lookups are lost. See the
# implementation of `RelatedManager.get_queryset()` for a better
# understanding of how this comes into play.
return self.get_queryset()
Manager = BaseManager.from_queryset(QuerySet, class_name='Manager')
class ManagerDescriptor(object):
# This class ensures managers aren't accessible via model instances.
# For example, Poll.objects works, but poll_obj.objects raises AttributeError.
def __init__(self, manager):
self.manager = manager
def __get__(self, instance, type=None):
if instance is not None:
raise AttributeError("Manager isn't accessible via %s instances" % type.__name__)
return self.manager
class AbstractManagerDescriptor(object):
# This class provides a better error message when you try to access a
# manager on an abstract model.
def __init__(self, model):
self.model = model
def __get__(self, instance, type=None):
raise AttributeError("Manager isn't available; %s is abstract" % (
self.model._meta.object_name,
))
class SwappedManagerDescriptor(object):
# This class provides a better error message when you try to access a
# manager on a swapped model.
def __init__(self, model):
self.model = model
def __get__(self, instance, type=None):
raise AttributeError("Manager isn't available; %s has been swapped for '%s'" % (
self.model._meta.object_name, self.model._meta.swapped
))
class EmptyManager(Manager):
def __init__(self, model):
super(EmptyManager, self).__init__()
self.model = model
def get_queryset(self):
return super(EmptyManager, self).get_queryset().none()
| {
"content_hash": "6b7e8399d92b6bd9d266f15a4af4251b",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 125,
"avg_line_length": 39.14746543778802,
"alnum_prop": 0.6309593878752208,
"repo_name": "ZhaoCJ/django",
"id": "4f16b5ebfebfa818da401920112398a872b03dce",
"size": "8495",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "django/db/models/manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
__author__ = 'farooq.sheikh'
from setuptools import setup, find_packages
setup(
name = 'asposeemailcloud',
packages = find_packages(),
version = '1.0.1',
description = 'Aspose.Email Cloud SDK for Python allows you to use Aspose.Email APIs in your Python applications',
author='Farooq Sheikh',
author_email='farooq.sheikh@aspose.com',
url='http://www.aspose.com/cloud/email-api.aspx',
install_requires=[
'asposestoragecloud',
],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent'
]
)
| {
"content_hash": "9e1e91d10df7bb31ad6013b65d7024c0",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 118,
"avg_line_length": 31.272727272727273,
"alnum_prop": 0.6380813953488372,
"repo_name": "asposeemail/Aspose_Email_Cloud",
"id": "faf3107b277c46265d324ad2654b0856ec4ce9da",
"size": "688",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "SDKs/Aspose.Email-Cloud-SDK-for-Python/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "203"
},
{
"name": "C#",
"bytes": "121104"
},
{
"name": "Java",
"bytes": "46866"
},
{
"name": "JavaScript",
"bytes": "79666"
},
{
"name": "Objective-C",
"bytes": "62346"
},
{
"name": "PHP",
"bytes": "38146"
},
{
"name": "Python",
"bytes": "49748"
},
{
"name": "Ruby",
"bytes": "52788"
}
],
"symlink_target": ""
} |
'''
Luhn modulo 10
~~~~~~~~~~~~~~
Used ie. in credit card numbers and Finnish BBAN.
'''
from ..checksum import (
ChecksumStrategy,
)
class Luhn10(ChecksumStrategy):
'''
Provides Luhn modulo 10 checksum algorithm.
'''
name = 'luhn'
def checksum(self, body):
digits = self._prepare(body)
odds = digits[-1::-2]
evens = digits[-2::-2]
checksum = 0
checksum += sum(evens)
for digit in odds:
checksum += sum(self._prepare(digit * 2))
checksum = 10 - int(str(checksum)[-1])
return str(checksum)
def split(self, value):
return (value[:-1], value[-1])
| {
"content_hash": "81c6db999416cfdbc7073bd1ddba277f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 53,
"avg_line_length": 19,
"alnum_prop": 0.5503759398496241,
"repo_name": "vaiski/checksum",
"id": "bbb49cc33c15152e7edc17ed311cbe9ca85a4492",
"size": "690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/checksum/strategies/luhn10.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22963"
}
],
"symlink_target": ""
} |
from thrift.transport import TTransport
class TBase(object):
__slots__ = ()
def __repr__(self):
L = ['%s=%r' % (key, getattr(self, key)) for key in self.__slots__]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for attr in self.__slots__:
my_val = getattr(self, attr)
other_val = getattr(other, attr)
if my_val != other_val:
return False
return True
def __ne__(self, other):
return not (self == other)
def read(self, iprot):
if (iprot._fast_decode is not None and
isinstance(iprot.trans, TTransport.CReadableTransport) and
self.thrift_spec is not None):
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
else:
iprot.readStruct(self, self.thrift_spec)
def write(self, oprot):
if (oprot._fast_encode is not None and self.thrift_spec is not None):
oprot.trans.write(
oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
else:
oprot.writeStruct(self, self.thrift_spec)
class TExceptionBase(TBase, Exception):
pass
class TFrozenBase(TBase):
def __setitem__(self, *args):
raise TypeError("Can't modify frozen struct")
def __delitem__(self, *args):
raise TypeError("Can't modify frozen struct")
def __hash__(self, *args):
return hash(self.__class__) ^ hash(self.__slots__)
@classmethod
def read(cls, iprot):
if (iprot._fast_decode is not None and
isinstance(iprot.trans, TTransport.CReadableTransport) and
cls.thrift_spec is not None):
self = cls()
return iprot._fast_decode(None, iprot,
[self.__class__, self.thrift_spec])
else:
return iprot.readStruct(cls, cls.thrift_spec, True)
class TFrozenExceptionBase(TFrozenBase, TExceptionBase):
pass
| {
"content_hash": "be9a46f4cd759f5e2fcb3605d19b9efd",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 79,
"avg_line_length": 31.47761194029851,
"alnum_prop": 0.5599810336652442,
"repo_name": "yongju-hong/thrift",
"id": "6c6ef18e877619b4cf5946eac3b898fba5a0c2dd",
"size": "2895",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "lib/py/src/protocol/TBase.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "51247"
},
{
"name": "C",
"bytes": "1068872"
},
{
"name": "C#",
"bytes": "527324"
},
{
"name": "C++",
"bytes": "4657822"
},
{
"name": "CMake",
"bytes": "129223"
},
{
"name": "CSS",
"bytes": "1070"
},
{
"name": "Common Lisp",
"bytes": "39679"
},
{
"name": "D",
"bytes": "662065"
},
{
"name": "Dart",
"bytes": "181474"
},
{
"name": "Dockerfile",
"bytes": "64523"
},
{
"name": "Emacs Lisp",
"bytes": "5361"
},
{
"name": "Erlang",
"bytes": "322899"
},
{
"name": "Go",
"bytes": "666693"
},
{
"name": "HTML",
"bytes": "36484"
},
{
"name": "Haxe",
"bytes": "323112"
},
{
"name": "Java",
"bytes": "1159310"
},
{
"name": "JavaScript",
"bytes": "445851"
},
{
"name": "Lex",
"bytes": "10804"
},
{
"name": "Lua",
"bytes": "81630"
},
{
"name": "M4",
"bytes": "171969"
},
{
"name": "Makefile",
"bytes": "217564"
},
{
"name": "OCaml",
"bytes": "39269"
},
{
"name": "PHP",
"bytes": "355023"
},
{
"name": "Pascal",
"bytes": "592933"
},
{
"name": "Perl",
"bytes": "132968"
},
{
"name": "Python",
"bytes": "503548"
},
{
"name": "Ruby",
"bytes": "400013"
},
{
"name": "Rust",
"bytes": "355663"
},
{
"name": "Shell",
"bytes": "59340"
},
{
"name": "Smalltalk",
"bytes": "22944"
},
{
"name": "Swift",
"bytes": "165395"
},
{
"name": "Thrift",
"bytes": "407557"
},
{
"name": "TypeScript",
"bytes": "61760"
},
{
"name": "Vim Script",
"bytes": "2846"
},
{
"name": "Yacc",
"bytes": "27511"
}
],
"symlink_target": ""
} |
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
BASE_STYLE = """<style>
@charset "utf-8";
@import \"//fonts.googleapis.com/css?family=Open+Sans\";
html {
height: 100%;
}
body {
color: #2d2d2d;
font-family: -apple-system, \"BlinkMacSystemFont\", \"Segoe UI\", \"Roboto\", \"Open Sans\", \"Helvetica\", \"Arial\", sans-serif;
font-size: 13px;
line-height: 18px;
shape-rendering: crispEdges;
}
iframe {
border-width: 0px;
height: 100%;
left: 0px;
position: absolute;
top: 0px;
width: 100%;
}
h1 {
font-size: 22px;
font-weight: 600;
line-height: 26px;
margin: 14px 0px 14px 0px;
}
a {
color: #4769cc;
text-decoration: none;
}
a:hover {
text-decoration: underline;
}
hr {
margin: 3px 0px 3px 0px;
}
table {
font-size: 13px;
line-height: 18px;
max-width: 760px;
table-layout: fixed;
word-break: break-all;
}
table th {
font-weight: 600;
}
table th > *, table td > * {
vertical-align: middle;
}
table th > a {
color: #2d2d2d;
}
table th > a.selected {
font-weight: bold;
text-decoration: underline;
}
table td > svg {
color: #4d4d4d;
fill: currentColor;
margin-right: 6px;
}
.traceback {
line-height: 26px;
margin: 8px 0px 8px 0px;
}
@media screen and (max-width: 760px) {
table th, table td {
display: none;
}
table td:nth-child(1) {
display: initial;
}
}
</style>"""
| {
"content_hash": "5a4a150fa03ed65cda1ed7e1fc0c0f32",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 134,
"avg_line_length": 24.142857142857142,
"alnum_prop": 0.6168639053254438,
"repo_name": "hivesolutions/netius",
"id": "4b49ad53faaf504d0fb67af67132c85cd1d3f9c4",
"size": "2750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/netius/common/style.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1400497"
}
],
"symlink_target": ""
} |
"""Module: Mosfet
This module implements a MosfetTemplate class for creating MOS
transistor PyCells.
MosfetTemplate provides the following capabilities:
- (float ) transistor width
- (float ) transistor length
- (integer) fingers, number of transistors
- (boolean) left diffusion contact
- (float ) left diffusion contact coverage
- (boolean) left transistor gate contact
- (float ) left transistor gate contact coverage
- (boolean) center diffusion contacts
- (float ) center diffusion contact coverage
- (boolean) center transistor gates contact
- (float ) center transistor gates contact coverage
- (boolean) right diffusion contact
- (float ) right diffusion contact coverage
- (boolean) right transistor gate contact
- (float ) right transistor gate contact coverage
- Stretch handles for contacts
- Stretch handles for gate w & l
- Auto-abutment
- Electrical connectivity, i.e. nets, pins, terminals.
Class variables:
- (string ) poly, Layer name
- (string ) diffusion, Layer name
- (string ) well, Layer name
- (string ) implant, Layer name
- (string ) contact, Layer name
- (string ) metal1, Layer name
Technology file requirements:
- (minEnclosure poly diffusion)
- (minEnclosure diffusion poly )
- (minSpacing contact poly )
- (minSpacing poly )
- (minWidth contact )
Additional requirements exist in Via module.
Module dependencies:
- cni.dlo, CiraNova PyCell APIs.
- Via, Contact PyCells
Exceptions:
- ValueError, for missing DRC rules in technology file.
EDA tool integration:
Stretch handles are specific features of layout editors.
Standard OpenAccess semantics do not exist. To support
stretch handles, we define a standard protocol, and create
customized interpreters for each layout editor. This
enables us to support stretch handles in multiple layout
editors without changes to the Python API or the PyCell
implementation.
Other notes:
[1] Dogbone configurations aren't implemented in this code.
For current processes, 90nm and below, the transistor
endcap to L-shaped source/drain diffusion spacing is
typically bigger. This type of conditional rule is
better modeled in upcoming API functions; hence, we
defer the implementation.
[2] Only creates pins for leftmost diffusion, rightmost diffusion,
and leftmost gate. Unclear what to do about the center gates
and diffusions, since this could be either a series or a
parallel structure.
"""
__revision__ = "$Id: Mosfet_vtg.py 151 2009-11-02 17:30:59Z wdavis@EOS.NCSU.EDU $"
__author__ = "Lyndon C. Lim"
from cni.dlo import (
Box,
Direction,
DloGen,
FailAction,
Grouping,
Instance,
Layer,
Location,
ParamArray,
ParamSpecArray,
Pin,
Point,
RangeConstraint,
Rect,
Term,
TermType,
Text,
)
from cni.integ.common import (
stretchHandle,
autoAbutment,
)
import traceback
from Via import (
ViaInstance,
)
class Dictionary:
pass
#### Layer rules in Santana.tech must be kept up-to-date for this to run correctly!
class MosfetTemplate( DloGen):
"""Defines a MosfetTemplate class.
"""
poly = "poly"
diffusion = "active"
well = "nwell or pwell"
implant = "pimplant"
contact = "contact"
metal1 = "metal1"
@classmethod
def defineParamSpecs(cls, specs):
"""Define the PyCell parameters. The order of invocation of
specs() becomes the order on the form.
Arguments:
specs - (ParamSpecArray) PyCell parameters
"""
oxide = "thin"
tranType = {"pimplant":"pmos_vtg", "nimplant":"nmos_vtg"}[cls.implant]
l = specs.tech.getMosfetParams( tranType, oxide, "minLength")
# No dogbone allowed.
w = specs.tech.getPhysicalRule( "minWidth", specs.tech.getLayer(cls.contact)) + \
2.0 * specs.tech.getPhysicalRule( "minEnclosure", specs.tech.getLayer(cls.diffusion), specs.tech.getLayer(cls.contact))
w = max( w, specs.tech.getMosfetParams( tranType, oxide, "minWidth"))
specs( "w", w, constraint = RangeConstraint( w, 1000*w, FailAction.USE_DEFAULT))
specs( "l", l, constraint = RangeConstraint( l, 1000*l, FailAction.USE_DEFAULT))
specs( "fingers", 1),
parameters = (
("diffContactLeft", True ),
("diffContactLeftCov", 1.0 ),
("gateContactLeft", False ),
("gateContactLeftCov", 1.0 ),
("diffContactCenter", False ),
("diffContactCenterCov", 1.0 ),
("gateContactCenter", False ),
("gateContactCenterCov", 1.0 ),
("diffContactRight", True ),
("diffContactRightCov", 1.0 ),
("gateContactRight", False ),
("gateContactRightCov", 1.0 ),
)
rangeConstraint = RangeConstraint(0.0, 1.0, FailAction.USE_DEFAULT)
for parameter in parameters:
if isinstance( parameter[1], float):
specs( parameter[0], parameter[1], constraint=rangeConstraint)
else:
specs( parameter[0], parameter[1])
def setupParams( self, params):
"""Process PyCell parameters, prior to geometric construction.
Decisions about process rules and PyCell-specific behaviors
should be confined to this method.
Create most useful format for variables to be used in later
methods.
Arguments:
params - (ParamArray) PyCell parameters
"""
for key in params:
self.__dict__[key] = params[ key]
for key in (
"diffContactLeftCov",
"gateContactLeftCov",
"diffContactCenterCov",
"gateContactCenterCov",
"diffContactRightCov",
"gateContactRightCov" ):
# Contact coverage parameters are 0.0 - 1.0
self.__dict__[key] = min( max( self.__dict__[key], 0), 1.0)
# Convert to process layer names
if self.implant == "pimplant":
self.encLayers = [ self.tech.getLayer( "nwell")]
self.well = self.tech.getLayer( "nwell")
else:
self.encLayers = [ self.tech.getLayer( "pwell")]
self.well = self.tech.getLayer( "pwell")
self.alt = self.tech.getLayer( "vtg")
self.poly = self.tech.getLayer( self.poly )
self.diffusion = self.tech.getLayer( self.diffusion )
self.implant = self.tech.getLayer( self.implant )
self.contact = self.tech.getLayer( self.contact )
self.metal1 = self.tech.getLayer( self.metal1 )
# Implant not an enclosing layer in our kit
# self.encLayers.append( self.implant)
self.instance = 0 # counter for instance names
# Get process design rule information
self.Endcap = self.tech.getPhysicalRule( "minEnclosure", self.poly, self.diffusion)
self.ContSpacePoly = self.tech.getPhysicalRule( "minSpacing", self.contact, self.poly)
self.DiffSpace = self.tech.getPhysicalRule( "minSpacing", self.diffusion)
self.GateSpace = self.tech.getPhysicalRule( "minSpacing", self.poly)
self.ContWidth = self.tech.getPhysicalRule( "minWidth", self.contact)
self.grid = self.tech.getGridResolution()
self.gridX2 = self.grid * 2.0
self.gridd2 = self.grid / 2.0
self.w = round(self.w / self.gridX2) * self.gridX2
self.l = round(self.l / self.gridX2) * self.gridX2
self.lDiv2 = self.l / 2.0
self.GatePitch = self.GateSpace + self.l
self.GatePitchDiv2 = (self.GateSpace + self.l) / 2.0
self.GateSpaceDiv2 = self.GateSpace / 2.0
self.ContGatePitch = self.ContSpacePoly + self.lDiv2 + (self.ContWidth / 2.0)
def genTopology( self):
"""Define topology (connectivity) for multi-device circuit PyCells.
"""
pass
def sizeDevices( self):
"""Define device sizes within multi-device circuit PyCells.
"""
pass
def createGate( self,
x=0,
y=0,
terminal=False):
"""Create the poly rectangle which represents the MOS
transistor gate.
Override this method to create custom gates.
Arguments:
x - (integer) x coordinate of gate center
y - (integer) y coordinate of lower diffusion edge
"""
left = x - self.lDiv2
right = x + self.lDiv2
# Create transistor gate
gateRect = Rect( self.poly,
Box( left, (y - self.Endcap),
right, (y + self.w + self.Endcap),
)
)
# Stretch handles for w & l
stretchHandle(
shape = gateRect,
name = ("stretch%d" % self.instance),
parameter = "w",
location = Location.UPPER_CENTER,
direction = Direction.NORTH_SOUTH,
display = ("w = %.2f" % self.w),
stretchType = "relative",
userScale = "1.0",
userSnap = "0.0025",
)
stretchHandle(
shape = gateRect,
name = ("stretch%d" % self.instance),
parameter = "l",
location = Location.CENTER_RIGHT,
direction = Direction.EAST_WEST,
display = ("l = %.2f" % self.l),
stretchType = "relative",
userScale = "1.0",
userSnap = "0.0025",
)
# Create weakly-connected pins
if terminal:
# Bottom gate pin
Pin(
"%sS%d" % (terminal, self.instance),
terminal,
Rect(
self.poly,
Box( left, (y - self.Endcap),
right, y,
)
)
)
# Top gate pin
Pin(
"%sN%d" % (terminal, self.instance),
terminal,
Rect(
self.poly,
Box( left, (y + self.w),
right, (y + self.w + self.Endcap),
)
)
)
self.instance += 1
return( gateRect)
def createGateCont( self,
gateRect=False,
coverage=1.0,
stretch=False,
terminal=False):
"""Create a gate contact by instantiating a poly contact PyCell.
Arguments:
gateRect - (PhysicalComponent) Gate rectangle for alignment.
coverage - (float ) Percentage of poly width to be covered
by contact
stretch - (string ) Name of stretch handle property for
gate contact
"""
gateCont = ViaInstance(
"pcont",
ParamArray(),
None,
"I%d" % self.instance,
)
self.place( gateCont, Direction.SOUTH, gateRect, 0)
width = self.l * coverage
gateCont.resize(
width = width,
via = self.contact,
metalLayer = self.poly,
)
# Create overlapping poly rectangle for stretch handle
polyRect = gateCont.promoteMetal( self.poly)
bbox = polyRect.getBBox()
width = max( width, bbox.getWidth()) / 2
center = bbox.getCenterX()
bbox.setLeft( center - width)
bbox.setRight( center + width)
polyRect.setBBox( bbox)
# Stretch handle for gate contact coverage
stretchHandle(
shape = polyRect,
name = ("stretch%d" % self.instance),
parameter = stretch,
location = Location.CENTER_RIGHT,
direction = Direction.EAST_WEST,
stretchType = "relative",
userScale = "1.0",
userSnap = "0.0025",
minVal = 0.0,
maxVal = 1.0,
)
# Create weakly-connected pins
if terminal:
Pin(
("%sC%d" % (terminal, self.instance)),
terminal,
Rect( self.poly, bbox)
)
self.instance += 1
return( gateCont)
def createSourceDrain( self,
diffusionType="full",
withContact=True,
x=0,
y=0,
coverage=1.0,
stretch=False,
terminal=False):
"""Create a source or drain diffusion.
Option to create diffusion contact instance.
Option to create matching diffusion terminal.
Option to create a stretch handle property.
Override this method to create custom contacts.
Arguments:
diffusionType - (string) "full", "left", "right"
withContact - (boolean) Create contact
x - (float ) x coordinate for center of contact
y - (float ) y coordinate for lower diffusion edge
coverage - (float ) Percentage of source/drain diffusion to
be covered by contact
stretch - (string ) Name of stretch handle property
"""
# Create source/drain contact
if withContact:
diffCont = ViaInstance(
"dcont",
ParamArray( origin="lowerCenter"),
None,
"I%d" % self.instance,
)
diffCont.setOrigin( Point(x, y-0.03))
height = self.w * coverage
diffCont.resize(
height = height,
via = self.contact,
metalLayer = self.diffusion,
)
# Create overlapping diffusion rectangle for stretch handle
diffRect = diffCont.promoteMetal( self.diffusion)
bbox = diffRect.getBBox()
height = max( height, bbox.getHeight())
bbox.setTop( bbox.getBottom() + height)
diffRect.setBBox( bbox)
# Stretch handle for diffusion contact coverage
stretchHandle(
shape = diffRect,
name = ("stretch%d" % self.instance),
parameter = stretch,
location = Location.UPPER_CENTER,
direction = Direction.NORTH_SOUTH,
stretchType = "relative",
userScale = "1.0",
userSnap = "0.0025",
minVal = 0.0,
maxVal = 1.0,
)
self.instance += 1
# Create source/drain diffusion
if withContact:
bbox = Box(
bbox.getLeft(), y,
bbox.getRight(), (y + self.w),
)
else:
if (diffusionType == "left"):
bbox = Box(
x, y,
(x + self.GateSpaceDiv2), (y + self.w),
)
elif (diffusionType == "right"):
bbox = Box(
(x - self.GateSpaceDiv2), y,
x, (y + self.w),
)
elif (diffusionType == "full"):
bbox = Box(
(x - self.GateSpaceDiv2), y,
(x + self.GateSpaceDiv2), (y + self.w),
)
else:
raise ValueError, "Unknown: diffusionType=%s" % diffusionType
if terminal:
p0 = Pin(
terminal,
terminal,
Rect( self.diffusion, bbox)
)
pinRect = p0.getShapes()[0]
autoAbutment(
pinRect,
self.w,
[ Direction.WEST],
"cniMOS",
abut2PinEqual = [ { "spacing":0.0}, { "diffLeftStyle":"DiffHalf" }, { "diffLeftStyle":"DiffHalf" } ],
abut2PinBigger = [ { "spacing":0.0}, { "diffLeftStyle":"DiffEdgeAbut" }, { "diffLeftStyle":"DiffEdgeAbut" } ],
abut3PinBigger = [ { "spacing":0.0}, { "diffLeftStyle":"ContactEdgeAbut2"}, { "diffLeftStyle":"ContactEdgeAbut2"} ],
abut3PinEqual = [ { "spacing":0.0}, { "diffLeftStyle":"DiffAbut" }, { "diffLeftStyle":"ContactEdgeAbut2"} ],
abut2PinSmaller = [ { "spacing":0.0}, { "diffLeftStyle":"DiffEdgeAbut" }, { "diffLeftStyle":"DiffEdgeAbut" } ],
abut3PinSmaller = [ { "spacing":0.0}, { "diffLeftStyle":"DiffEdgeAbut" }, { "diffLeftStyle":"DiffEdgeAbut" } ],
noAbut = [ { "spacing":0.4}],
function = "cniAbut",
#shape = pinRect,
#abutDirection = diffusionType,
#abutClass = "cniMOS",
#abutFunction = "cniAbut",
#spacingRule = self.DiffSpace,
)
else:
pinRect = Rect( self.diffusion, bbox)
return( pinRect)
def genLayout( self):
"""Main body of geometric construction code. Create the
leftmost contact and transistor gate. Loop to create center
contacts and gates. Create the rightmost gate and contact.
Avoid modifying or overriding this method. PyCell-specific
behaviors and calculations should be kept out of this method.
"""
# obj is used to track the rightmost object, to calculate
# the diffusion coordinates.
# dbox is the bounding box of the underlying diffusion.
dbox = Dictionary()
dbox.bottom = 0
dbox.top = self.w
origin = Dictionary()
xCoord = 0
origin.y = 0
objectPitch = {
True:self.ContGatePitch,
False:self.GatePitchDiv2,
}
# Mark PyCell as containing stretch handles
self.props["cniStretch"] = "CiraNova"
# For integration with layout editors, save parameter
# settings in the submaster. They are not saved on the
# instance in the default case.
# For auto-abutment
self.props["diffContactLeft"] = self.diffContactLeft
self.props["diffContactRight"] = self.diffContactRight
# For stretch handles
self.props["w"] = self.w
self.props["l"] = self.l
# Create electrical terminals needed for pins
Term("G", TermType.INPUT)
Term("S", TermType.INPUT_OUTPUT)
Term("D", TermType.INPUT_OUTPUT)
# Create leftmost diffusion contact
obj = self.createSourceDrain(
diffusionType = "left",
withContact = self.diffContactLeft,
coverage = self.diffContactLeftCov,
stretch = "diffContactLeftCov",
terminal = "S",
x = xCoord,
)
dbox.left = obj.getBBox( self.diffusion).getLeft()
# Create leftmost gate w/optional gate contact
xCoord += objectPitch[self.diffContactLeft]# + 0.0025
obj = self.createGate( x=xCoord, terminal="G")
origin.x = obj.getBBox().left
if self.gateContactLeft:
self.createGateCont(
gateRect = obj,
coverage = self.gateContactLeftCov,
stretch = "gateContactLeftCov",
terminal = "G",
)
# Loop to create center gates and contacts
for i in range( self.fingers - 2):
# Create diffusion contact on left of gate
xCoord += objectPitch[self.diffContactCenter] + 0.0025
self.createSourceDrain(
diffusionType = "full",
withContact = self.diffContactCenter,
coverage = self.diffContactCenterCov,
stretch = "diffContactCenterCov",
x = xCoord,
)
# Create gate w/optional gate contact
if self.diffContactCenter:
xCoord += objectPitch[self.diffContactCenter] + 0.0025
else:
xCoord += objectPitch[self.diffContactCenter] - 0.0025
obj = self.createGate( x=xCoord, terminal="G")
if self.gateContactCenter:
self.createGateCont(
gateRect = obj,
coverage = self.gateContactCenterCov,
stretch = "gateContactCenterCov",
terminal = "G",
)
# Create rightmost gate w/optional gate contact
if self.fingers > 1:
if self.diffContactCenter:
xCoord += objectPitch[self.diffContactCenter] + 0.0025
else:
xCoord += objectPitch[self.diffContactCenter] - 0.0025
self.createSourceDrain(
diffusionType = "full",
withContact = self.diffContactCenter,
coverage = self.diffContactCenterCov,
stretch = "diffContactCenterCov",
x = xCoord,
)
xCoord += objectPitch[self.diffContactCenter] + 0.0025
obj = self.createGate( x=xCoord, terminal="G")
if self.gateContactRight:
self.createGateCont(
gateRect = obj,
coverage = self.gateContactRightCov,
stretch = "gateContactRightCov",
terminal = "G",
)
# Create rightmost diffusion contact
xCoord += objectPitch[self.diffContactRight]# + 0.0025
obj = self.createSourceDrain(
diffusionType = "right",
withContact = self.diffContactRight,
coverage = self.diffContactRightCov,
stretch = "diffContactRightCov",
x = xCoord,
terminal = "D",
)
dbox.right = obj.getBBox(self.diffusion).getRight()
# Create overall diffusion box
Rect(
self.diffusion,
Box( dbox.left, dbox.bottom, dbox.right, dbox.top)
)
# Create implant box, to overlap diffusion rather than whole cell
Rect(
self.implant,
Box( dbox.left, dbox.bottom, dbox.right, dbox.top)
)
Rect(
self.well,
Box( dbox.left - 0.055, dbox.bottom - 0.055, dbox.right + 0.055, dbox.top + 0.055 )
)
Rect(
self.alt,
Box( dbox.left - 0.055, dbox.bottom - 0.055, dbox.right + 0.055, dbox.top + 0.055 )
)
# Create other outline layers
all = Grouping( "all", self.getComps())
# all.add( self.fgAddEnclosingRects( all, self.encLayers)) This wasn't working, replaced with above rectangles
# Setting the origin is important.
# Avoid shifting of instance locations during auto-abutment.
# Correctly track mouse motion during stretching.
all.moveBy( -origin.x, -origin.y)
@classmethod
def unitTest( cls, paramsMaker, lib, cell, view, ignoreError=True):
"""Test single instance or specific method of the PyCell.
"""
# Note: Pass in paramMaker so parameters are constructed in
# the correct tech context (within the current DloGen).
def unitTestMethod( self):
"""Define how to build the unit test.
"""
# Get default parameters from specs, then update
# with explicitly supplied specs for unitTest.
specs = ParamSpecArray()
self.defineParamSpecs( specs)
params = ParamArray( specs)
params.update( paramsMaker())
print
print( "Creating design: %s" % repr(self))
print( " using technology: %s" % self.tech.id())
print( " by %s.generate(%r)" % (self.__class__.__name__, params))
specs.verify( params)
self.generate( params)
self.save()
try:
cls.withNewDlo( unitTestMethod, lib, cell, view)
except:
if ignoreError:
# Error messages go to debug log
print
print( "Exception caught.")
traceback.print_exc()
else:
raise
class Nmos_vtg( MosfetTemplate):
"""Define Nmos class to implement NMOS MOS transistors.
"""
implant = "nimplant"
class Pmos_vtg( MosfetTemplate):
"""Define Nmos class to implement PMOS MOS transistors.
"""
implant = "pimplant"
########################################################################
#
# End
#
########################################################################
###############################################################################
#
# Define self-tests
#
###############################################################################
if __name__ == "__main__":
def smalltest( self):
"""Create layout instances for quick development debugging.
"""
i = 0
x = 0
y = 0
param = ParamArray(
w = 0.6,
l = 0.18,
fingers = 1,
diffContactLeft = True,
diffContactLeftCov = 0.7,
gateContactLeft = False,
gateContactLeftCov = 0.7,
diffContactCenter = False,
diffContactCenterCov = 0.5,
gateContactCenter = False,
gateContactCenterCov = 0.5,
diffContactRight = False,
diffContactRightCov = 1.0,
gateContactRight = True,
gateContactRightCov = 1.0,
)
for master in [ "nmos_vtg", "pmos_vtg"]:
inst = Instance(("%s" % master), param, None, ("I%d" % i))
inst.setOrigin( Point( x,y))
i += 1
if (i % 4):
x += 10
else:
x = 0
y += 10
param = ParamArray(
w = 2.0,
l = 1.5,
fingers = 1,
diffContactLeft = True,
diffContactLeftCov = 0.3,
gateContactLeft = True,
gateContactLeftCov = 0.3,
diffContactCenter = True,
diffContactCenterCov = 0.5,
gateContactCenter = True,
gateContactCenterCov = 0.5,
diffContactRight = True,
diffContactRightCov = 0.7,
gateContactRight = True,
gateContactRightCov = 0.7,
)
for master in [ "nmos_vtg", "pmos_vtg"]:
inst = Instance(("%s" % master), param, None, ("I%d" % i))
inst.setOrigin( Point( x,y))
i += 1
if (i % 4):
x += 10
else:
x = 0
y += 10
param = ParamArray(
w = 2.0,
l = 1.5,
fingers = 2,
diffContactLeft = True,
diffContactLeftCov = 0.3,
gateContactLeft = True,
gateContactLeftCov = 0.3,
diffContactCenter = True,
diffContactCenterCov = 0.5,
gateContactCenter = True,
gateContactCenterCov = 0.5,
diffContactRight = True,
diffContactRightCov = 1.0,
gateContactRight = True,
gateContactRightCov = 1.0,
)
for master in [ "nmos_vtg", "pmos_vtg"]:
inst = Instance(("%s" % master), param, None, ("I%d" % i))
inst.setOrigin( Point( x,y))
i += 1
if (i % 4):
x += 10
else:
x = 0
y += 10
param = ParamArray(
w = 2.0,
l = 1.5,
fingers = 2,
diffContactLeft = False,
diffContactLeftCov = 1.0,
gateContactLeft = True,
gateContactLeftCov = 1.0,
diffContactCenter = False,
diffContactCenterCov = 0.5,
gateContactCenter = True,
gateContactCenterCov = 0.6,
diffContactRight = True,
diffContactRightCov = 0.4,
gateContactRight = False,
gateContactRightCov = 0.4,
)
for master in [ "nmos_vtg", "pmos_vtg"]:
inst = Instance(("%s" % master), param, None, ("I%d" % i))
inst.setOrigin( Point( x,y))
i += 1
if (i % 4):
x += 10
else:
x = 0
y += 20
self.save()
def bigtest( self):
"""Create layout instances for comprehensive testing, such as DRC or
regression testing.
"""
i = 0
x = 0
y = 0
for w in [ 0.09, 2.0]:
for l in [ 0.05, 1.0]:
for fingers in [ 1, 2]:
for diffContactLeftCov in [ 0.0, 0.33, 1.0]:
for gateContactLeftCov in [ 0.0, 0.33, 1.0]:
for diffContactCenterCov in [ 0.0, 0.33, 1.0]:
for gateContactCenterCov in [ 0.0, 0.33, 1.0]:
for diffContactRightCov in [ 0.0, 0.33, 1.0]:
for gateContactRightCov in [ 0.0, 0.33, 1.0]:
param = ParamArray(
w = w,
l = l,
fingers = fingers,
diffContactLeft = (not diffContactLeftCov),
diffContactLeftCov = diffContactLeftCov,
gateContactLeft = (not gateContactLeftCov),
gateContactLeftCov = gateContactLeftCov,
diffContactCenter = (not diffContactCenterCov),
diffContactCenterCov = diffContactCenterCov,
gateContactCenter = (not gateContactCenterCov),
gateContactCenterCov = gateContactCenterCov,
diffContactRight = (not diffContactRightCov),
diffContactRightCov = diffContactRightCov,
gateContactRight = (not gateContactRightCov),
gateContactRightCov = gateContactRightCov,
)
for master in [ "nmos_vtg", "pmos_vtg"]:
inst = Instance(("%s" % master), param, None, ("I%d" % i))
inst.setOrigin( Point( x,y))
i += 1
if (i % 100):
x += 20
else:
x = 0
y += 20
print("Total number of instances created: %d" % i)
self.save()
# TEST is defined externally from this file.
# For building the test cases, invoke like this:
# cnpy -c "TEST='SMALL';execfile('Mosfet.py')"
if "TEST" in vars():
if vars()["TEST"] == "SMALL":
MosfetTemplate.unitTest(lambda: ParamArray(), "MyPyCellLib", "UNITTEST_Mosfet", "layout")
DloGen.withNewDlo( smalltest, "MyPyCellLib", "SMALLTEST_Mosfet", "layout")
elif vars()["TEST"] == "BIG":
DloGen.withNewDlo( bigtest, "MyPyCellLib", "BIGTEST_Mosfet", "layout")
else:
DloGen.withNewDlo( smalltest, "MyPyCellLib", "SMALLTEST_Mosfet", "layout")
# end
| {
"content_hash": "22fc236eb842e357d3ff0027c5720b08",
"timestamp": "",
"source": "github",
"line_count": 1006,
"max_line_length": 135,
"avg_line_length": 31.77037773359841,
"alnum_prop": 0.5115296767935922,
"repo_name": "seyedmaysamlavasani/GorillaPP",
"id": "4f3746b72e594fe9ef76a0adac6281d49e9956d5",
"size": "34679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/multiProtocolNpu/build/synthesis/asic/FreePDK45/ncsu_basekit/techfile/cni/pycells/Mosfet_vtg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "129274"
},
{
"name": "Awk",
"bytes": "934"
},
{
"name": "C",
"bytes": "200144"
},
{
"name": "C++",
"bytes": "789600"
},
{
"name": "CSS",
"bytes": "4650"
},
{
"name": "Coq",
"bytes": "74091"
},
{
"name": "HCL",
"bytes": "53864"
},
{
"name": "HTML",
"bytes": "1658579"
},
{
"name": "Java",
"bytes": "1094612"
},
{
"name": "JavaScript",
"bytes": "4851"
},
{
"name": "Lex",
"bytes": "1473"
},
{
"name": "Makefile",
"bytes": "42775"
},
{
"name": "Matlab",
"bytes": "19158"
},
{
"name": "Perl",
"bytes": "256294"
},
{
"name": "Python",
"bytes": "296561"
},
{
"name": "Scala",
"bytes": "1520079"
},
{
"name": "Scheme",
"bytes": "1346"
},
{
"name": "Shell",
"bytes": "10426"
},
{
"name": "SourcePawn",
"bytes": "26063"
},
{
"name": "SystemVerilog",
"bytes": "53486"
},
{
"name": "Tcl",
"bytes": "150498"
},
{
"name": "TeX",
"bytes": "257086"
},
{
"name": "VHDL",
"bytes": "974866"
},
{
"name": "Verilog",
"bytes": "18224595"
},
{
"name": "VimL",
"bytes": "31141"
},
{
"name": "XSLT",
"bytes": "21507"
},
{
"name": "Yacc",
"bytes": "2336"
}
],
"symlink_target": ""
} |
import random
class RandomRiver:
def __init__(self, points_part=0.1):
self.points_part = points_part
def generate(self, map_obj):
random.seed(map_obj.seed)
# calculate Corner.downslope and Corner.downslope_edge
for corner in map_obj.corners:
downslope = corner
for neighbour in corner.adjacent:
if neighbour.elevation <= downslope.elevation:
downslope = neighbour
corner.downslope = downslope
for edge in corner.protrudes:
if downslope in edge.corners:
corner.downslope_edge = edge
break
# TODO: calculate watersheds https://github.com/amitp/mapgen2/blob/master/Map.as#L605
# Do we really need this?
# generate rivers
points = int(self.points_part * len(map_obj.points))
for _ in range(points):
corner = random.choice(map_obj.corners)
if corner.ocean or corner.river or corner.elevation < 0.3 or corner.elevation > 0.9 \
or corner.downslope == corner:
continue
# move river to ocean or lake
while not corner.water:
corner.river += 1
corner.downslope_edge.river += 1
corner = corner.downslope
# fix river value of estuary
corner.river += 1
| {
"content_hash": "608a1f14dae7abbdfe58b7503a9ca918",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 97,
"avg_line_length": 33.904761904761905,
"alnum_prop": 0.5603932584269663,
"repo_name": "Alerion/fantasy_map",
"id": "4fc0afd7bb328994f76ae4436ca3d24a0a7b0c8c",
"size": "1424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "map/generators/rivers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2316"
},
{
"name": "Makefile",
"bytes": "357"
},
{
"name": "Python",
"bytes": "71210"
}
],
"symlink_target": ""
} |
from .dev import * # noqa: F401
# Here be settings used for tests
| {
"content_hash": "509f22bba76486fd063514e42eb81d75",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 33,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.6911764705882353,
"repo_name": "Alien1993/bank-reader",
"id": "38193f32b564a7d1f769f6f3236c7e19e8ccfa91",
"size": "68",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django-bank-reader/bank-reader/settings/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3851"
},
{
"name": "HTML",
"bytes": "1649"
},
{
"name": "JavaScript",
"bytes": "13647"
},
{
"name": "Lua",
"bytes": "1894"
},
{
"name": "Python",
"bytes": "28014"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipe', '0028_auto_20181016_1349'),
]
operations = [
migrations.AlterField(
model_name='recipetype',
name='name',
field=models.CharField(max_length=50, unique=True),
),
]
| {
"content_hash": "062494565247116d0ca4905be2d4ca33",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 63,
"avg_line_length": 21.77777777777778,
"alnum_prop": 0.5969387755102041,
"repo_name": "ngageoint/scale",
"id": "af3373e3f9df3108ae71bde4e0c422b7fb611c5d",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scale/recipe/migrations/0029_auto_20181016_1431.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7219"
},
{
"name": "CSS",
"bytes": "12193"
},
{
"name": "Dockerfile",
"bytes": "14853"
},
{
"name": "HCL",
"bytes": "301"
},
{
"name": "HTML",
"bytes": "48818"
},
{
"name": "JavaScript",
"bytes": "503"
},
{
"name": "Makefile",
"bytes": "5852"
},
{
"name": "Python",
"bytes": "5295677"
},
{
"name": "Shell",
"bytes": "26650"
}
],
"symlink_target": ""
} |
import datetime
import sys
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
from .. import _serialization
if sys.version_info >= (3, 9):
from collections.abc import MutableMapping
else:
from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from .. import models as _models
JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object
class AvailableOperations(_serialization.Model):
"""Available operations of the service.
:ivar value: Collection of available operation details.
:vartype value: list[~azure.mgmt.healthbot.models.OperationDetail]
:ivar next_link: URL client should use to fetch the next page (per server side paging).
It's null for now, added for future use.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[OperationDetail]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.OperationDetail"]] = None, next_link: Optional[str] = None, **kwargs
):
"""
:keyword value: Collection of available operation details.
:paramtype value: list[~azure.mgmt.healthbot.models.OperationDetail]
:keyword next_link: URL client should use to fetch the next page (per server side paging).
It's null for now, added for future use.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class BotResponseList(_serialization.Model):
"""The list of Azure Health Bot operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar next_link: The link used to get the next page of bot service resources.
:vartype next_link: str
:ivar value: Gets the list of Azure Health Bot results and their properties.
:vartype value: list[~azure.mgmt.healthbot.models.HealthBot]
"""
_validation = {
"next_link": {"readonly": True},
"value": {"readonly": True},
}
_attribute_map = {
"next_link": {"key": "nextLink", "type": "str"},
"value": {"key": "value", "type": "[HealthBot]"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.next_link = None
self.value = None
class Error(_serialization.Model):
"""The resource management error response.
:ivar error: The error object.
:vartype error: ~azure.mgmt.healthbot.models.ErrorError
"""
_attribute_map = {
"error": {"key": "error", "type": "ErrorError"},
}
def __init__(self, *, error: Optional["_models.ErrorError"] = None, **kwargs):
"""
:keyword error: The error object.
:paramtype error: ~azure.mgmt.healthbot.models.ErrorError
"""
super().__init__(**kwargs)
self.error = error
class ErrorAdditionalInfo(_serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: JSON
"""
_validation = {
"type": {"readonly": True},
"info": {"readonly": True},
}
_attribute_map = {
"type": {"key": "type", "type": "str"},
"info": {"key": "info", "type": "object"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.type = None
self.info = None
class ErrorError(_serialization.Model):
"""The error object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~azure.mgmt.healthbot.models.Error]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~azure.mgmt.healthbot.models.ErrorAdditionalInfo]
"""
_validation = {
"code": {"readonly": True},
"message": {"readonly": True},
"target": {"readonly": True},
"details": {"readonly": True},
"additional_info": {"readonly": True},
}
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
"target": {"key": "target", "type": "str"},
"details": {"key": "details", "type": "[Error]"},
"additional_info": {"key": "additionalInfo", "type": "[ErrorAdditionalInfo]"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class Resource(_serialization.Model):
"""The resource model definition for a ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.healthbot.models.SystemData
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.system_data = None
class TrackedResource(Resource):
"""The resource model definition for a ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.healthbot.models.SystemData
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar location: The geo-location where the resource lives. Required.
:vartype location: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
"location": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"tags": {"key": "tags", "type": "{str}"},
"location": {"key": "location", "type": "str"},
}
def __init__(self, *, location: str, tags: Optional[Dict[str, str]] = None, **kwargs):
"""
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword location: The geo-location where the resource lives. Required.
:paramtype location: str
"""
super().__init__(**kwargs)
self.tags = tags
self.location = location
class HealthBot(TrackedResource):
"""Azure Health Bot resource definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.healthbot.models.SystemData
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar location: The geo-location where the resource lives. Required.
:vartype location: str
:ivar sku: SKU of the Azure Health Bot. Required.
:vartype sku: ~azure.mgmt.healthbot.models.Sku
:ivar identity: The identity of the Azure Health Bot.
:vartype identity: ~azure.mgmt.healthbot.models.Identity
:ivar properties: The set of properties specific to Azure Health Bot resource.
:vartype properties: ~azure.mgmt.healthbot.models.HealthBotProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
"location": {"required": True},
"sku": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"tags": {"key": "tags", "type": "{str}"},
"location": {"key": "location", "type": "str"},
"sku": {"key": "sku", "type": "Sku"},
"identity": {"key": "identity", "type": "Identity"},
"properties": {"key": "properties", "type": "HealthBotProperties"},
}
def __init__(
self,
*,
location: str,
sku: "_models.Sku",
tags: Optional[Dict[str, str]] = None,
identity: Optional["_models.Identity"] = None,
properties: Optional["_models.HealthBotProperties"] = None,
**kwargs
):
"""
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword location: The geo-location where the resource lives. Required.
:paramtype location: str
:keyword sku: SKU of the Azure Health Bot. Required.
:paramtype sku: ~azure.mgmt.healthbot.models.Sku
:keyword identity: The identity of the Azure Health Bot.
:paramtype identity: ~azure.mgmt.healthbot.models.Identity
:keyword properties: The set of properties specific to Azure Health Bot resource.
:paramtype properties: ~azure.mgmt.healthbot.models.HealthBotProperties
"""
super().__init__(tags=tags, location=location, **kwargs)
self.sku = sku
self.identity = identity
self.properties = properties
class HealthBotKey(_serialization.Model):
"""An entry of HealthBotKeysResponse.
:ivar key_name: The name of the key.
:vartype key_name: str
:ivar value: The value of the key.
:vartype value: str
"""
_attribute_map = {
"key_name": {"key": "keyName", "type": "str"},
"value": {"key": "value", "type": "str"},
}
def __init__(self, *, key_name: Optional[str] = None, value: Optional[str] = None, **kwargs):
"""
:keyword key_name: The name of the key.
:paramtype key_name: str
:keyword value: The value of the key.
:paramtype value: str
"""
super().__init__(**kwargs)
self.key_name = key_name
self.value = value
class HealthBotKeysResponse(_serialization.Model):
"""Health Bot Keys Response.
:ivar secrets: Array of Azure Health Bot Secrets.
:vartype secrets: list[~azure.mgmt.healthbot.models.HealthBotKey]
"""
_attribute_map = {
"secrets": {"key": "secrets", "type": "[HealthBotKey]"},
}
def __init__(self, *, secrets: Optional[List["_models.HealthBotKey"]] = None, **kwargs):
"""
:keyword secrets: Array of Azure Health Bot Secrets.
:paramtype secrets: list[~azure.mgmt.healthbot.models.HealthBotKey]
"""
super().__init__(**kwargs)
self.secrets = secrets
class HealthBotProperties(_serialization.Model):
"""The properties of a Azure Health Bot. The Health Bot Service is a cloud platform that empowers developers in Healthcare organizations to build and deploy their compliant, AI-powered virtual health assistants and health bots, that help them improve processes and reduce costs.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: The provisioning state of the Azure Health Bot resource.
:vartype provisioning_state: str
:ivar bot_management_portal_link: The link.
:vartype bot_management_portal_link: str
:ivar key_vault_properties: KeyVault properties for the resource encryption.
:vartype key_vault_properties: ~azure.mgmt.healthbot.models.KeyVaultProperties
"""
_validation = {
"provisioning_state": {"readonly": True},
"bot_management_portal_link": {"readonly": True},
}
_attribute_map = {
"provisioning_state": {"key": "provisioningState", "type": "str"},
"bot_management_portal_link": {"key": "botManagementPortalLink", "type": "str"},
"key_vault_properties": {"key": "keyVaultProperties", "type": "KeyVaultProperties"},
}
def __init__(self, *, key_vault_properties: Optional["_models.KeyVaultProperties"] = None, **kwargs):
"""
:keyword key_vault_properties: KeyVault properties for the resource encryption.
:paramtype key_vault_properties: ~azure.mgmt.healthbot.models.KeyVaultProperties
"""
super().__init__(**kwargs)
self.provisioning_state = None
self.bot_management_portal_link = None
self.key_vault_properties = key_vault_properties
class HealthBotUpdateParameters(_serialization.Model):
"""Parameters for updating a Azure Health Bot.
:ivar properties: Properties of Azure Health Bot.
:vartype properties: ~azure.mgmt.healthbot.models.HealthBotProperties
:ivar tags: Tags for a Azure Health Bot.
:vartype tags: dict[str, str]
:ivar sku: SKU of the Azure Health Bot.
:vartype sku: ~azure.mgmt.healthbot.models.Sku
:ivar identity: The identity of the Azure Health Bot.
:vartype identity: ~azure.mgmt.healthbot.models.Identity
:ivar location:
:vartype location: str
"""
_attribute_map = {
"properties": {"key": "properties", "type": "HealthBotProperties"},
"tags": {"key": "tags", "type": "{str}"},
"sku": {"key": "sku", "type": "Sku"},
"identity": {"key": "identity", "type": "Identity"},
"location": {"key": "location", "type": "str"},
}
def __init__(
self,
*,
properties: Optional["_models.HealthBotProperties"] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["_models.Sku"] = None,
identity: Optional["_models.Identity"] = None,
location: Optional[str] = None,
**kwargs
):
"""
:keyword properties: Properties of Azure Health Bot.
:paramtype properties: ~azure.mgmt.healthbot.models.HealthBotProperties
:keyword tags: Tags for a Azure Health Bot.
:paramtype tags: dict[str, str]
:keyword sku: SKU of the Azure Health Bot.
:paramtype sku: ~azure.mgmt.healthbot.models.Sku
:keyword identity: The identity of the Azure Health Bot.
:paramtype identity: ~azure.mgmt.healthbot.models.Identity
:keyword location:
:paramtype location: str
"""
super().__init__(**kwargs)
self.properties = properties
self.tags = tags
self.sku = sku
self.identity = identity
self.location = location
class Identity(_serialization.Model):
"""Identity for the resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal ID of resource identity. This property will only be provided
for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of resource. This property will only be provided for a system
assigned identity.
:vartype tenant_id: str
:ivar type: The identity type. The type 'SystemAssigned, UserAssigned' includes both an
implicitly created identity and a set of user assigned identities. The type 'None' will remove
any identities from the Azure Health Bot. Known values are: "SystemAssigned", "UserAssigned",
"SystemAssigned, UserAssigned", and "None".
:vartype type: str or ~azure.mgmt.healthbot.models.ResourceIdentityType
:ivar user_assigned_identities: The list of user identities associated with the resource. The
user identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:vartype user_assigned_identities: dict[str, ~azure.mgmt.healthbot.models.UserAssignedIdentity]
"""
_validation = {
"principal_id": {"readonly": True},
"tenant_id": {"readonly": True},
}
_attribute_map = {
"principal_id": {"key": "principalId", "type": "str"},
"tenant_id": {"key": "tenantId", "type": "str"},
"type": {"key": "type", "type": "str"},
"user_assigned_identities": {"key": "userAssignedIdentities", "type": "{UserAssignedIdentity}"},
}
def __init__(
self,
*,
type: Optional[Union[str, "_models.ResourceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "_models.UserAssignedIdentity"]] = None,
**kwargs
):
"""
:keyword type: The identity type. The type 'SystemAssigned, UserAssigned' includes both an
implicitly created identity and a set of user assigned identities. The type 'None' will remove
any identities from the Azure Health Bot. Known values are: "SystemAssigned", "UserAssigned",
"SystemAssigned, UserAssigned", and "None".
:paramtype type: str or ~azure.mgmt.healthbot.models.ResourceIdentityType
:keyword user_assigned_identities: The list of user identities associated with the resource.
The user identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:paramtype user_assigned_identities: dict[str,
~azure.mgmt.healthbot.models.UserAssignedIdentity]
"""
super().__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class KeyVaultProperties(_serialization.Model):
"""Properties of the key vault.
All required parameters must be populated in order to send to Azure.
:ivar key_name: The name of the key vault key. Required.
:vartype key_name: str
:ivar key_version: The version of the key vault key.
:vartype key_version: str
:ivar key_vault_uri: The Uri of the key vault. Required.
:vartype key_vault_uri: str
:ivar user_identity: The user assigned identity (ARM resource id) that has access to the key.
:vartype user_identity: str
"""
_validation = {
"key_name": {"required": True},
"key_vault_uri": {"required": True},
}
_attribute_map = {
"key_name": {"key": "keyName", "type": "str"},
"key_version": {"key": "keyVersion", "type": "str"},
"key_vault_uri": {"key": "keyVaultUri", "type": "str"},
"user_identity": {"key": "userIdentity", "type": "str"},
}
def __init__(
self,
*,
key_name: str,
key_vault_uri: str,
key_version: Optional[str] = None,
user_identity: Optional[str] = None,
**kwargs
):
"""
:keyword key_name: The name of the key vault key. Required.
:paramtype key_name: str
:keyword key_version: The version of the key vault key.
:paramtype key_version: str
:keyword key_vault_uri: The Uri of the key vault. Required.
:paramtype key_vault_uri: str
:keyword user_identity: The user assigned identity (ARM resource id) that has access to the
key.
:paramtype user_identity: str
"""
super().__init__(**kwargs)
self.key_name = key_name
self.key_version = key_version
self.key_vault_uri = key_vault_uri
self.user_identity = user_identity
class OperationDetail(_serialization.Model):
"""Operation detail payload.
:ivar name: Name of the operation.
:vartype name: str
:ivar is_data_action: Indicates whether the operation is a data action.
:vartype is_data_action: bool
:ivar display: Display of the operation.
:vartype display: ~azure.mgmt.healthbot.models.OperationDisplay
:ivar origin: Origin of the operation.
:vartype origin: str
:ivar properties: Additional properties.
:vartype properties: JSON
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"is_data_action": {"key": "isDataAction", "type": "bool"},
"display": {"key": "display", "type": "OperationDisplay"},
"origin": {"key": "origin", "type": "str"},
"properties": {"key": "properties", "type": "object"},
}
def __init__(
self,
*,
name: Optional[str] = None,
is_data_action: Optional[bool] = None,
display: Optional["_models.OperationDisplay"] = None,
origin: Optional[str] = None,
properties: Optional[JSON] = None,
**kwargs
):
"""
:keyword name: Name of the operation.
:paramtype name: str
:keyword is_data_action: Indicates whether the operation is a data action.
:paramtype is_data_action: bool
:keyword display: Display of the operation.
:paramtype display: ~azure.mgmt.healthbot.models.OperationDisplay
:keyword origin: Origin of the operation.
:paramtype origin: str
:keyword properties: Additional properties.
:paramtype properties: JSON
"""
super().__init__(**kwargs)
self.name = name
self.is_data_action = is_data_action
self.display = display
self.origin = origin
self.properties = properties
class OperationDisplay(_serialization.Model):
"""Operation display payload.
:ivar provider: Resource provider of the operation.
:vartype provider: str
:ivar resource: Resource of the operation.
:vartype resource: str
:ivar operation: Localized friendly name for the operation.
:vartype operation: str
:ivar description: Localized friendly description for the operation.
:vartype description: str
"""
_attribute_map = {
"provider": {"key": "provider", "type": "str"},
"resource": {"key": "resource", "type": "str"},
"operation": {"key": "operation", "type": "str"},
"description": {"key": "description", "type": "str"},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword provider: Resource provider of the operation.
:paramtype provider: str
:keyword resource: Resource of the operation.
:paramtype resource: str
:keyword operation: Localized friendly name for the operation.
:paramtype operation: str
:keyword description: Localized friendly description for the operation.
:paramtype description: str
"""
super().__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class Sku(_serialization.Model):
"""The resource model definition representing SKU.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the Azure Health Bot SKU. Required. Known values are: "F0", "S1", and
"C0".
:vartype name: str or ~azure.mgmt.healthbot.models.SkuName
"""
_validation = {
"name": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
}
def __init__(self, *, name: Union[str, "_models.SkuName"], **kwargs):
"""
:keyword name: The name of the Azure Health Bot SKU. Required. Known values are: "F0", "S1",
and "C0".
:paramtype name: str or ~azure.mgmt.healthbot.models.SkuName
"""
super().__init__(**kwargs)
self.name = name
class SystemData(_serialization.Model):
"""Read only system data.
:ivar created_by: The identity that created the resource.
:vartype created_by: str
:ivar created_by_type: The type of identity that created the resource. Known values are:
"User", "Application", "ManagedIdentity", and "Key".
:vartype created_by_type: str or ~azure.mgmt.healthbot.models.IdentityType
:ivar created_at: The timestamp of resource creation (UTC).
:vartype created_at: ~datetime.datetime
:ivar last_modified_by: The identity that last modified the resource.
:vartype last_modified_by: str
:ivar last_modified_by_type: The type of identity that last modified the resource. Known values
are: "User", "Application", "ManagedIdentity", and "Key".
:vartype last_modified_by_type: str or ~azure.mgmt.healthbot.models.IdentityType
:ivar last_modified_at: The timestamp of resource last modification (UTC).
:vartype last_modified_at: ~datetime.datetime
"""
_attribute_map = {
"created_by": {"key": "createdBy", "type": "str"},
"created_by_type": {"key": "createdByType", "type": "str"},
"created_at": {"key": "createdAt", "type": "iso-8601"},
"last_modified_by": {"key": "lastModifiedBy", "type": "str"},
"last_modified_by_type": {"key": "lastModifiedByType", "type": "str"},
"last_modified_at": {"key": "lastModifiedAt", "type": "iso-8601"},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "_models.IdentityType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "_models.IdentityType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
"""
:keyword created_by: The identity that created the resource.
:paramtype created_by: str
:keyword created_by_type: The type of identity that created the resource. Known values are:
"User", "Application", "ManagedIdentity", and "Key".
:paramtype created_by_type: str or ~azure.mgmt.healthbot.models.IdentityType
:keyword created_at: The timestamp of resource creation (UTC).
:paramtype created_at: ~datetime.datetime
:keyword last_modified_by: The identity that last modified the resource.
:paramtype last_modified_by: str
:keyword last_modified_by_type: The type of identity that last modified the resource. Known
values are: "User", "Application", "ManagedIdentity", and "Key".
:paramtype last_modified_by_type: str or ~azure.mgmt.healthbot.models.IdentityType
:keyword last_modified_at: The timestamp of resource last modification (UTC).
:paramtype last_modified_at: ~datetime.datetime
"""
super().__init__(**kwargs)
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
class UserAssignedIdentity(_serialization.Model):
"""The details of the user assigned managed identity used by the Video Analyzer resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal ID of user assigned identity.
:vartype principal_id: str
:ivar client_id: The client ID of user assigned identity.
:vartype client_id: str
"""
_validation = {
"principal_id": {"readonly": True},
"client_id": {"readonly": True},
}
_attribute_map = {
"principal_id": {"key": "principalId", "type": "str"},
"client_id": {"key": "clientId", "type": "str"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.principal_id = None
self.client_id = None
class ValidationResult(_serialization.Model):
"""The response returned from validation process.
:ivar status: The status code of the response validation.
:vartype status: str
"""
_attribute_map = {
"status": {"key": "status", "type": "str"},
}
def __init__(self, *, status: Optional[str] = None, **kwargs):
"""
:keyword status: The status code of the response validation.
:paramtype status: str
"""
super().__init__(**kwargs)
self.status = status
| {
"content_hash": "3c31dfc9a2b0854cdd9d610b0eef0599",
"timestamp": "",
"source": "github",
"line_count": 815,
"max_line_length": 282,
"avg_line_length": 36.74723926380368,
"alnum_prop": 0.6204547731142943,
"repo_name": "Azure/azure-sdk-for-python",
"id": "2d352e7ea640239523b719d3cdec4dbc3c027b37",
"size": "30450",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/healthbot/azure-mgmt-healthbot/azure/mgmt/healthbot/models/_models_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import contextlib
import logging
import os
from teuthology import misc as teuthology
from teuthology import packaging
from teuthology.orchestra import run
log = logging.getLogger(__name__)
def _get_builder_project(ctx, remote, config):
return packaging.get_builder_project()(
config.get('project', 'ceph'),
config,
remote=remote,
ctx=ctx
)
def _get_local_dir(config, remote):
"""
Extract local directory name from the task lists.
Copy files over to the remote site.
"""
ldir = config.get('local', None)
if ldir:
remote.run(args=['sudo', 'mkdir', '-p', ldir])
for fyle in os.listdir(ldir):
fname = "%s/%s" % (ldir, fyle)
teuthology.sudo_write_file(
remote, fname, open(fname).read(), '644')
return ldir
def get_flavor(config):
"""
Determine the flavor to use.
"""
config = config or dict()
flavor = config.get('flavor', 'basic')
if config.get('path'):
# local dir precludes any other flavors
flavor = 'local'
else:
if config.get('valgrind'):
flavor = 'notcmalloc'
else:
if config.get('coverage'):
flavor = 'gcov'
return flavor
def _ship_utilities(ctx):
"""
Write a copy of valgrind.supp to each of the remote sites. Set executables
used by Ceph in /usr/local/bin. When finished (upon exit of the teuthology
run), remove these files.
:param ctx: Context
"""
testdir = teuthology.get_testdir(ctx)
filenames = []
log.info('Shipping valgrind.supp...')
assert 'suite_path' in ctx.config
try:
with open(
os.path.join(ctx.config['suite_path'], 'valgrind.supp'),
'rb'
) as f:
fn = os.path.join(testdir, 'valgrind.supp')
filenames.append(fn)
for rem in ctx.cluster.remotes.keys():
teuthology.sudo_write_file(
remote=rem,
path=fn,
data=f,
)
f.seek(0)
except IOError as e:
log.info('Cannot ship supression file for valgrind: %s...', e.strerror)
FILES = ['daemon-helper', 'adjust-ulimits']
destdir = '/usr/bin'
for filename in FILES:
log.info('Shipping %r...', filename)
src = os.path.join(os.path.dirname(__file__), filename)
dst = os.path.join(destdir, filename)
filenames.append(dst)
with open(src, 'rb') as f:
for rem in ctx.cluster.remotes.keys():
teuthology.sudo_write_file(
remote=rem,
path=dst,
data=f,
)
f.seek(0)
rem.run(
args=[
'sudo',
'chmod',
'a=rx',
'--',
dst,
],
)
return filenames
def _remove_utilities(ctx, filenames):
"""
Remove the shipped utilities.
:param ctx: Context
:param filenames: The utilities install paths
"""
log.info('Removing shipped files: %s...', ' '.join(filenames))
if filenames == []:
return
run.wait(
ctx.cluster.run(
args=[
'sudo',
'rm',
'-f',
'--',
] + list(filenames),
wait=False,
),
)
@contextlib.contextmanager
def ship_utilities(ctx, config):
"""
Ship utilities during the first call, and skip it in the following ones.
See also `_ship_utilities`.
:param ctx: Context
:param config: Configuration
"""
assert config is None
do_ship_utilities = ctx.get('do_ship_utilities', True)
if do_ship_utilities:
ctx['do_ship_utilities'] = False
filenames = _ship_utilities(ctx)
try:
yield
finally:
_remove_utilities(ctx, filenames)
else:
log.info('Utilities already shipped, skip it...')
yield
| {
"content_hash": "64dec173fca6324b56d3db02fd40ebf8",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 79,
"avg_line_length": 27.15032679738562,
"alnum_prop": 0.5185363505055368,
"repo_name": "SUSE/teuthology",
"id": "dce71ec2b6e28ec7f64a049bbe470c9357e54f8e",
"size": "4154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "teuthology/task/install/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1096"
},
{
"name": "Makefile",
"bytes": "4194"
},
{
"name": "Python",
"bytes": "1439804"
},
{
"name": "Shell",
"bytes": "61271"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "speedycrew.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "99348e8b09b0bd44bde30768113241f2",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 74,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.7142857142857143,
"repo_name": "michaelmaguire/twosidedsearch",
"id": "7245e49c080d70d6710a5c850c4cfebc2886d43d",
"size": "253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/speedy-api/manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "11293"
},
{
"name": "C",
"bytes": "1025"
},
{
"name": "C#",
"bytes": "1116"
},
{
"name": "C++",
"bytes": "15670"
},
{
"name": "CSS",
"bytes": "205983"
},
{
"name": "DIGITAL Command Language",
"bytes": "350"
},
{
"name": "DTrace",
"bytes": "163"
},
{
"name": "HTML",
"bytes": "21143"
},
{
"name": "Java",
"bytes": "896770"
},
{
"name": "JavaScript",
"bytes": "234576"
},
{
"name": "Makefile",
"bytes": "446"
},
{
"name": "Objective-C",
"bytes": "254924"
},
{
"name": "Objective-C++",
"bytes": "82369"
},
{
"name": "PLpgSQL",
"bytes": "63248"
},
{
"name": "Python",
"bytes": "127787"
},
{
"name": "Shell",
"bytes": "33556"
}
],
"symlink_target": ""
} |
"""
Fork of pynxos library from network to code and mzbenami
Reimplemented by ktbyers to support XML-RPC in addition to JSON-RPC
"""
from __future__ import print_function, unicode_literals
from typing import List, Optional, Any, Union
from napalm.nxapi_plumbing.errors import NXAPIError, NXAPICommandError
from napalm.nxapi_plumbing.api_client import RPCClient, XMLClient, RPCBase
class Device(object):
def __init__(
self,
host: str,
username: str,
password: str,
transport: str = "http",
api_format: str = "jsonrpc",
port: Optional[int] = None,
timeout: int = 30,
verify: bool = True,
):
self.host = host
self.username = username
self.password = password
self.transport = transport
self.api_format = api_format
self.verify = verify
self.port = port
self.api: RPCBase
if api_format == "xml":
self.api = XMLClient(
host,
username,
password,
transport=transport,
port=port,
timeout=timeout,
verify=verify,
)
elif api_format == "jsonrpc":
self.api = RPCClient(
host,
username,
password,
transport=transport,
port=port,
timeout=timeout,
verify=verify,
)
def show(self, command: str, raw_text: bool = False) -> Any:
"""Send a non-configuration command.
Args:
command (str): The command to send to the device.
Keyword Args:
raw_text (bool): Whether to return raw text or structured data.
Returns:
The output of the show command, which could be raw text or structured data.
"""
commands = [command]
result = self.show_list(commands, raw_text)
if len(result) > 1:
raise NXAPIError(
"Length of response inconsistent with number of commands executed."
)
# Return the only entry or the empty response
if result:
if self.api_format == "jsonrpc":
return result[0]["result"]
elif self.api_format == "xml":
return result[0]
return result
def show_list(self, commands: List[str], raw_text: bool = False) -> List[Any]:
"""Send a list of non-configuration commands.
Args:
commands (list): A list of commands to send to the device.
Keyword Args:
raw_text (bool): Whether to return raw text or structured data.
Returns:
A list of outputs for each show command
"""
cmd_method = self.api.cmd_method_raw if raw_text else self.api.cmd_method
return self.api._nxapi_command(commands, method=cmd_method)
def config(self, command: str) -> Union[str, List]:
"""Send a configuration command.
Args:
command (str): The command to send to the device.
Raises:
NXAPICommandError: If there is a problem with the supplied command.
"""
commands = [command]
result = self.config_list(commands)
if len(result) > 1:
raise NXAPIError(
"Length of response inconsistent with number of commands executed."
)
# Return the only entry or the empty response
if result:
if self.api_format == "jsonrpc":
return result[0]["result"]
elif self.api_format == "xml":
return result[0]
return result
def config_list(self, commands: List[str]) -> List[Any]:
"""Send a list of configuration commands.
Args:
commands (list): A list of commands to send to the device.
Raises:
NXAPICommandError: If there is a problem with one of the commands in the list.
"""
return self.api._nxapi_command_conf(commands)
def save(self, filename: str = "startup-config") -> bool:
"""Save a device's running configuration.
Args:
filename (str): The filename on the remote device.
If none is supplied, the implementing class should
save to the "startup configuration".
"""
try:
cmd = "copy run {}".format(filename)
self.show(cmd, raw_text=True)
except NXAPICommandError as e:
if "overwrite" in e.message:
return False
raise
return True
def rollback(self, filename: str) -> None:
"""Rollback to a checkpoint file.
Args:
filename (str): The filename of the checkpoint file to load into the running
configuration.
"""
cmd = "rollback running-config file {}".format(filename)
self.show(cmd, raw_text=True)
def checkpoint(self, filename: str) -> None:
"""Save a checkpoint of the running configuration to the device.
Args:
filename (str): The filename to save the checkpoint as on the remote device.
"""
self.show_list(
["terminal dont-ask", "checkpoint file {}".format(filename)], raw_text=True
)
| {
"content_hash": "7e1ae64091412e94f5ac8c8a506e4049",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 90,
"avg_line_length": 31.191860465116278,
"alnum_prop": 0.557502329916123,
"repo_name": "napalm-automation/napalm",
"id": "3f2ac5f5bcb84d669184fa0b427908b9160bb23b",
"size": "5365",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm/nxapi_plumbing/device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "324"
},
{
"name": "Jinja",
"bytes": "17789"
},
{
"name": "Makefile",
"bytes": "117"
},
{
"name": "Python",
"bytes": "1142188"
},
{
"name": "Roff",
"bytes": "931"
},
{
"name": "Smarty",
"bytes": "14010"
}
],
"symlink_target": ""
} |
try: #pragma: no cover
from .tw2widgets.widgets import *
except ImportError: #pragma: no cover
from .tw1widgets.widgets import * | {
"content_hash": "98ddb1b8e509390f6b73577eafb2bc60",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 37,
"avg_line_length": 34,
"alnum_prop": 0.7352941176470589,
"repo_name": "gjhiggins/sprox",
"id": "0e67c57c41fcc7f97b0ed015ff6417608ff0b3c6",
"size": "136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sprox/widgets/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "158"
},
{
"name": "Python",
"bytes": "342035"
}
],
"symlink_target": ""
} |
from .. import base
from girder.models.model_base import AccessControlledModel, Model, AccessType
from girder.utility.model_importer import ModelImporter
class FakeAcModel(AccessControlledModel):
def initialize(self):
self.name = 'fake_ac'
self.exposeFields(level=AccessType.READ, fields='read')
self.exposeFields(level=AccessType.WRITE, fields=('write', 'write2'))
self.exposeFields(level=AccessType.ADMIN, fields='admin')
self.exposeFields(level=AccessType.SITE_ADMIN, fields='sa')
def validate(self, doc):
return doc
class FakeModel(Model):
def initialize(self):
self.name = 'fake'
self.exposeFields(level=AccessType.READ, fields='read')
self.exposeFields(level=AccessType.SITE_ADMIN, fields='sa')
def validate(self, doc):
return doc
def setUpModule():
base.startServer()
def tearDownModule():
base.stopServer()
class FilterTestCase(base.TestCase):
"""
Unit test the model filtering utilities.
"""
def setUp(self):
base.TestCase.setUp(self)
users = ({
'email': 'good@email.com',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword'
}, {
'email': 'regularuser@email.com',
'login': 'regularuser',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword'
})
self.admin, self.user = [
self.model('user').createUser(**user) for user in users]
ModelImporter.registerModel('fake_ac', FakeAcModel())
ModelImporter.registerModel('fake', FakeModel())
def testModelFiltering(self):
fields = {
'hidden': 1,
'read': 1,
'write': 1,
'write2': 1,
'admin': 1,
'sa': 1
}
# Test filter behavior on access controlled model
fakeAc = self.model('fake_ac').save(fields)
fakeAc = self.model('fake_ac').setUserAccess(
fakeAc, self.user, level=AccessType.READ)
filtered = self.model('fake_ac').filter(fakeAc, self.admin)
self.assertTrue('sa' in filtered)
self.assertTrue('write' in filtered)
self.assertFalse('hidden' in filtered)
self.model('fake_ac').exposeFields(
level=AccessType.READ, fields='hidden')
filtered = self.model('fake_ac').filter(fakeAc, self.user)
self.assertTrue('hidden' in filtered)
self.assertTrue('read' in filtered)
self.assertFalse('write' in filtered)
self.assertFalse('admin' in filtered)
self.assertFalse('sa' in filtered)
self.model('fake_ac').hideFields(level=AccessType.READ, fields='read')
fakeAc = self.model('fake_ac').setUserAccess(
fakeAc, self.user, level=AccessType.ADMIN)
filtered = self.model('fake_ac').filter(fakeAc, self.user)
self.assertTrue('hidden' in filtered)
self.assertTrue('write' in filtered)
self.assertTrue('admin' in filtered)
self.assertFalse('read' in filtered)
self.assertFalse('sa' in filtered)
# Test Model implementation
fake = self.model('fake').save(fields)
filtered = self.model('fake').filter(fake, self.user)
self.assertEqual(filtered, {'read': 1, '_modelType': 'fake'})
filtered = self.model('fake').filter(fake, self.admin)
self.assertEqual(filtered, {
'read': 1,
'sa': 1,
'_modelType': 'fake'
})
| {
"content_hash": "0e8de9ca2d2738151ffab0ac32346bce",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 78,
"avg_line_length": 31.224137931034484,
"alnum_prop": 0.5949751518498068,
"repo_name": "chrismattmann/girder",
"id": "7c0a67144928f45fc971fe6642bb2a777f0c21dc",
"size": "4416",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/cases/filter_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "36635"
},
{
"name": "CSS",
"bytes": "156740"
},
{
"name": "HTML",
"bytes": "161646"
},
{
"name": "JavaScript",
"bytes": "1358011"
},
{
"name": "Mako",
"bytes": "1483"
},
{
"name": "Python",
"bytes": "1202964"
},
{
"name": "Ruby",
"bytes": "9923"
},
{
"name": "Shell",
"bytes": "3298"
}
],
"symlink_target": ""
} |
"""
RPyC **registry server** implementation. The registry is much like
`Avahi <http://en.wikipedia.org/wiki/Avahi_(software)>`_ or
`Bonjour <http://en.wikipedia.org/wiki/Bonjour_(software)>`_, but tailored to
the needs of RPyC. Also, neither of them supports (or supported) Windows,
and Bonjour has a restrictive license. Moreover, they are too "powerful" for
what RPyC needed and required too complex a setup.
If anyone wants to implement the RPyC registry using Avahi, Bonjour, or any
other zeroconf implementation -- I'll be happy to include them.
Refer to :file:`rpyc/scripts/rpyc_registry.py` for more info.
"""
import sys
import socket
import time
import logging
from rpyc.core import brine
DEFAULT_PRUNING_TIMEOUT = 4 * 60
MAX_DGRAM_SIZE = 1500
REGISTRY_PORT = 18811
#------------------------------------------------------------------------------
# servers
#------------------------------------------------------------------------------
class RegistryServer(object):
"""Base registry server"""
def __init__(self, listenersock, pruning_timeout = None, logger = None):
self.sock = listenersock
self.port = self.sock.getsockname()[1]
self.active = False
self.services = {}
if pruning_timeout is None:
pruning_timeout = DEFAULT_PRUNING_TIMEOUT
self.pruning_timeout = pruning_timeout
if logger is None:
logger = self._get_logger()
self.logger = logger
def _get_logger(self):
raise NotImplementedError()
def on_service_added(self, name, addrinfo):
"""called when a new service joins the registry (but not on keepalives).
override this to add custom logic"""
def on_service_removed(self, name, addrinfo):
"""called when a service unregisters or is pruned.
override this to add custom logic"""
def _add_service(self, name, addrinfo):
"""updates the service's keep-alive time stamp"""
if name not in self.services:
self.services[name] = {}
is_new = addrinfo not in self.services[name]
self.services[name][addrinfo] = time.time()
if is_new:
try:
self.on_service_added(name, addrinfo)
except Exception:
self.logger.exception('error executing service add callback')
def _remove_service(self, name, addrinfo):
"""removes a single server of the given service"""
self.services[name].pop(addrinfo, None)
if not self.services[name]:
del self.services[name]
try:
self.on_service_removed(name, addrinfo)
except Exception:
self.logger.exception('error executing service remove callback')
def cmd_query(self, host, name):
"""implementation of the ``query`` command"""
name = name.upper()
self.logger.debug("querying for %r", name)
if name not in self.services:
self.logger.debug("no such service")
return ()
oldest = time.time() - self.pruning_timeout
all_servers = sorted(self.services[name].items(), key = lambda x: x[1])
servers = []
for addrinfo, t in all_servers:
if t < oldest:
self.logger.debug("discarding stale %s:%s", *addrinfo)
self._remove_service(name, addrinfo)
else:
servers.append(addrinfo)
self.logger.debug("replying with %r", servers)
return tuple(servers)
def cmd_register(self, host, names, port):
"""implementation of the ``register`` command"""
self.logger.debug("registering %s:%s as %s", host, port, ", ".join(names))
for name in names:
self._add_service(name.upper(), (host, port))
return "OK"
def cmd_unregister(self, host, port):
"""implementation of the ``unregister`` command"""
self.logger.debug("unregistering %s:%s", host, port)
for name in self.services.keys():
self._remove_service(name, (host, port))
return "OK"
def _recv(self):
raise NotImplementedError()
def _send(self, data, addrinfo):
raise NotImplementedError()
def _work(self):
while self.active:
try:
data, addrinfo = self._recv()
except (socket.error, socket.timeout):
continue
try:
magic, cmd, args = brine.load(data)
except Exception:
continue
if magic != "RPYC":
self.logger.warn("invalid magic: %r", magic)
continue
cmdfunc = getattr(self, "cmd_%s" % (cmd.lower(),), None)
if not cmdfunc:
self.logger.warn("unknown command: %r", cmd)
continue
try:
reply = cmdfunc(addrinfo[0], *args)
except Exception:
self.logger.exception('error executing function')
else:
self._send(brine.dump(reply), addrinfo)
def start(self):
"""Starts the registry server (blocks)"""
if self.active:
raise ValueError("server is already running")
if self.sock is None:
raise ValueError("object disposed")
self.logger.debug("server started on %s:%s", *self.sock.getsockname()[:2])
try:
self.active = True
self._work()
except KeyboardInterrupt:
self.logger.warn("User interrupt!")
finally:
self.active = False
self.logger.debug("server closed")
self.sock.close()
self.sock = None
def close(self):
"""Closes (terminates) the registry server"""
if not self.active:
raise ValueError("server is not running")
self.logger.debug("stopping server...")
self.active = False
class UDPRegistryServer(RegistryServer):
"""UDP-based registry server. The server listens to UDP broadcasts and
answers them. Useful in local networks, were broadcasts are allowed"""
TIMEOUT = 1.0
def __init__(self, host = "0.0.0.0", port = REGISTRY_PORT, pruning_timeout = None, logger = None):
family, socktype, proto, _, sockaddr = socket.getaddrinfo(host, port, 0,
socket.SOCK_DGRAM)[0]
sock = socket.socket(family, socktype, proto)
sock.bind(sockaddr)
sock.settimeout(self.TIMEOUT)
RegistryServer.__init__(self, sock, pruning_timeout = pruning_timeout,
logger = logger)
def _get_logger(self):
return logging.getLogger("REGSRV/UDP/%d" % (self.port,))
def _recv(self):
return self.sock.recvfrom(MAX_DGRAM_SIZE)
def _send(self, data, addrinfo):
try:
self.sock.sendto(data, addrinfo)
except (socket.error, socket.timeout):
pass
class TCPRegistryServer(RegistryServer):
"""TCP-based registry server. The server listens to a certain TCP port and
answers requests. Useful when you need to cross routers in the network, since
they block UDP broadcasts"""
TIMEOUT = 3.0
def __init__(self, host = "0.0.0.0", port = REGISTRY_PORT, pruning_timeout = None,
logger = None, reuse_addr = True):
family, socktype, proto, _, sockaddr = socket.getaddrinfo(host, port, 0,
socket.SOCK_STREAM)[0]
sock = socket.socket(family, socktype, proto)
if reuse_addr and sys.platform != "win32":
# warning: reuseaddr is not what you expect on windows!
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(sockaddr)
sock.listen(10)
sock.settimeout(self.TIMEOUT)
RegistryServer.__init__(self, sock, pruning_timeout = pruning_timeout,
logger = logger)
self._connected_sockets = {}
def _get_logger(self):
return logging.getLogger("REGSRV/TCP/%d" % (self.port,))
def _recv(self):
sock2, _ = self.sock.accept()
addrinfo = sock2.getpeername()
data = sock2.recv(MAX_DGRAM_SIZE)
self._connected_sockets[addrinfo] = sock2
return data, addrinfo
def _send(self, data, addrinfo):
sock2 = self._connected_sockets.pop(addrinfo)
try:
sock2.send(data)
except (socket.error, socket.timeout):
pass
finally:
sock2.close()
#------------------------------------------------------------------------------
# clients (registrars)
#------------------------------------------------------------------------------
class RegistryClient(object):
"""Base registry client. Also known as **registrar**"""
REREGISTER_INTERVAL = 60
def __init__(self, ip, port, timeout, logger = None):
self.ip = ip
self.port = port
self.timeout = timeout
if logger is None:
logger = self._get_logger()
self.logger = logger
def _get_logger(self):
raise NotImplementedError()
def discover(self, name):
"""Sends a query for the specified service name.
:param name: the service name (or one of its aliases)
:returns: a list of ``(host, port)`` tuples
"""
raise NotImplementedError()
def register(self, aliases, port):
"""Registers the given service aliases with the given TCP port. This
API is intended to be called only by an RPyC server.
:param aliases: the :class:`service's <rpyc.core.service.Service>` aliases
:param port: the listening TCP port of the server
"""
raise NotImplementedError()
def unregister(self, port):
"""Unregisters the given RPyC server. This API is intended to be called
only by an RPyC server.
:param port: the listening TCP port of the RPyC server to unregister
"""
raise NotImplementedError()
class UDPRegistryClient(RegistryClient):
"""UDP-based registry clients. By default, it sends UDP broadcasts (requires
special user privileges on certain OS's) and collects the replies. You can
also specify the IP address to send to.
Example::
registrar = UDPRegistryClient()
list_of_servers = registrar.discover("foo")
.. note::
Consider using :func:`rpyc.utils.factory.discover` instead
"""
def __init__(self, ip = "255.255.255.255", port = REGISTRY_PORT, timeout = 2,
bcast = None, logger = None, ipv6 = False):
RegistryClient.__init__(self, ip = ip, port = port, timeout = timeout,
logger = logger)
if ipv6:
self.sock_family = socket.AF_INET6
self.bcast = False
else:
self.sock_family = socket.AF_INET
if bcast is None:
bcast = "255" in ip.split(".")
self.bcast = bcast
def _get_logger(self):
return logging.getLogger('REGCLNT/UDP')
def discover(self, name):
sock = socket.socket(self.sock_family, socket.SOCK_DGRAM)
try:
if self.bcast:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)
data = brine.dump(("RPYC", "QUERY", (name,)))
sock.sendto(data, (self.ip, self.port))
sock.settimeout(self.timeout)
try:
data, _ = sock.recvfrom(MAX_DGRAM_SIZE)
except (socket.error, socket.timeout):
servers = ()
else:
servers = brine.load(data)
finally:
sock.close()
return servers
def register(self, aliases, port, interface = ""):
self.logger.info("registering on %s:%s", self.ip, self.port)
sock = socket.socket(self.sock_family, socket.SOCK_DGRAM)
sock.bind((interface, 0))
try:
if self.bcast:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)
data = brine.dump(("RPYC", "REGISTER", (aliases, port)))
sock.sendto(data, (self.ip, self.port))
tmax = time.time() + self.timeout
while time.time() < tmax:
sock.settimeout(tmax - time.time())
try:
data, address = sock.recvfrom(MAX_DGRAM_SIZE)
rip, rport = address[:2]
except socket.timeout:
self.logger.warn("no registry acknowledged")
return False
if rport != self.port:
continue
try:
reply = brine.load(data)
except Exception:
continue
if reply == "OK":
self.logger.info("registry %s:%s acknowledged", rip, rport)
return True
else:
self.logger.warn("no registry acknowledged")
return False
finally:
sock.close()
def unregister(self, port):
self.logger.info("unregistering from %s:%s", self.ip, self.port)
sock = socket.socket(self.sock_family, socket.SOCK_DGRAM)
try:
if self.bcast:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)
data = brine.dump(("RPYC", "UNREGISTER", (port,)))
sock.sendto(data, (self.ip, self.port))
finally:
sock.close()
class TCPRegistryClient(RegistryClient):
"""TCP-based registry client. You must specify the host (registry server)
to connect to.
Example::
registrar = TCPRegistryClient("localhost")
list_of_servers = registrar.discover("foo")
.. note::
Consider using :func:`rpyc.utils.factory.discover` instead
"""
def __init__(self, ip, port = REGISTRY_PORT, timeout = 2, logger = None):
RegistryClient.__init__(self, ip = ip, port = port, timeout = timeout,
logger = logger)
def _get_logger(self):
return logging.getLogger('REGCLNT/TCP')
def discover(self, name):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
try:
data = brine.dump(("RPYC", "QUERY", (name,)))
sock.connect((self.ip, self.port))
sock.send(data)
try:
data = sock.recv(MAX_DGRAM_SIZE)
except (socket.error, socket.timeout):
servers = ()
else:
servers = brine.load(data)
finally:
sock.close()
return servers
def register(self, aliases, port, interface = ""):
self.logger.info("registering on %s:%s", self.ip, self.port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((interface, 0))
sock.settimeout(self.timeout)
data = brine.dump(("RPYC", "REGISTER", (aliases, port)))
try:
try:
sock.connect((self.ip, self.port))
sock.send(data)
except (socket.error, socket.timeout):
self.logger.warn("could not connect to registry")
return False
try:
data = sock.recv(MAX_DGRAM_SIZE)
except socket.timeout:
self.logger.warn("registry did not acknowledge")
return False
try:
reply = brine.load(data)
except Exception:
self.logger.warn("received corrupted data from registry")
return False
if reply == "OK":
self.logger.info("registry %s:%s acknowledged", self.ip, self.port)
return True
finally:
sock.close()
def unregister(self, port):
self.logger.info("unregistering from %s:%s", self.ip, self.port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
try:
data = brine.dump(("RPYC", "UNREGISTER", (port,)))
try:
sock.connect((self.ip, self.port))
sock.send(data)
except (socket.error, socket.timeout):
self.logger.warn("could not connect to registry")
finally:
sock.close()
| {
"content_hash": "fdd45ce65e00d738b75333971ae4b119",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 102,
"avg_line_length": 35.556277056277054,
"alnum_prop": 0.5578011809825287,
"repo_name": "gleon99/rpyc",
"id": "711ed2327df90866622b0759303308e2209502bb",
"size": "16427",
"binary": false,
"copies": "8",
"ref": "refs/heads/logs",
"path": "rpyc/utils/registry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "272404"
}
],
"symlink_target": ""
} |
import collections
XMLNS_11 = "http://docs.openstack.org/compute/api/v1.1"
XMLNS_V3 = "http://docs.openstack.org/compute/api/v1.1"
# NOTE(danms): This is just a silly implementation to help make generating
# XML faster for prototyping. Could be replaced with proper etree gorp
# if desired
class Element(object):
def __init__(self, element_name, *args, **kwargs):
self.element_name = element_name
self._attrs = kwargs
self._elements = list(args)
def add_attr(self, name, value):
self._attrs[name] = value
def append(self, element):
self._elements.append(element)
def __str__(self):
args = " ".join(['%s="%s"' %
(k, v if v is not None else "")
for k, v in self._attrs.items()])
string = '<%s %s' % (self.element_name, args)
if not self._elements:
string += '/>'
return string
string += '>'
for element in self._elements:
string += str(element)
string += '</%s>' % self.element_name
return string
def __getitem__(self, name):
for element in self._elements:
if element.element_name == name:
return element
raise KeyError("No such element `%s'" % name)
def __getattr__(self, name):
if name in self._attrs:
return self._attrs[name]
return object.__getattr__(self, name)
def attributes(self):
return self._attrs.items()
def children(self):
return self._elements
class Document(Element):
def __init__(self, *args, **kwargs):
if 'version' not in kwargs:
kwargs['version'] = '1.0'
if 'encoding' not in kwargs:
kwargs['encoding'] = 'UTF-8'
Element.__init__(self, '?xml', *args, **kwargs)
def __str__(self):
args = " ".join(['%s="%s"' %
(k, v if v is not None else "")
for k, v in self._attrs.items()])
string = '<?xml %s?>\n' % args
for element in self._elements:
string += str(element)
return string
class Text(Element):
def __init__(self, content=""):
Element.__init__(self, None)
self.__content = content
def __str__(self):
return self.__content
def xml_to_json(node):
"""This does a really braindead conversion of an XML tree to
something that looks like a json dump. In cases where the XML
and json structures are the same, then this "just works". In
others, it requires a little hand-editing of the result.
"""
json = {}
for attr in node.keys():
if not attr.startswith("xmlns"):
json[attr] = node.get(attr)
if not node.getchildren():
return node.text or json
for child in node.getchildren():
tag = child.tag
if tag.startswith("{"):
ns, tag = tag.split("}", 1)
json[tag] = xml_to_json(child)
return json
def deep_dict_to_xml(dest, source):
"""Populates the ``dest`` xml element with the ``source`` ``Mapping``
elements, if the source Mapping's value is also a ``Mapping``
they will be recursively added as a child elements.
:param source: A python ``Mapping`` (dict)
:param dest: XML child element will be added to the ``dest``
"""
for element, content in source.iteritems():
if isinstance(content, collections.Mapping):
xml_element = Element(element)
deep_dict_to_xml(xml_element, content)
dest.append(xml_element)
else:
dest.append(Element(element, content))
| {
"content_hash": "c99c151d35a5cd3b977ca21c54aafd9c",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 74,
"avg_line_length": 31.084745762711865,
"alnum_prop": 0.5627044711014176,
"repo_name": "adkerr/tempest",
"id": "860dd5b4d20ffe972fd459757daa6ae514093621",
"size": "4339",
"binary": false,
"copies": "3",
"ref": "refs/heads/netapp/akerr",
"path": "tempest/services/compute/xml/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1855736"
},
{
"name": "Shell",
"bytes": "5748"
}
],
"symlink_target": ""
} |
def register_all():
# NOTE(danms): You must make sure your object gets imported in this
# function in order for it to be registered by services that may
# need to receive it via RPC.
__import__('nova.objects.agent')
__import__('nova.objects.aggregate')
__import__('nova.objects.bandwidth_usage')
__import__('nova.objects.block_device')
__import__('nova.objects.compute_node')
__import__('nova.objects.dns_domain')
__import__('nova.objects.ec2')
__import__('nova.objects.external_event')
__import__('nova.objects.fixed_ip')
__import__('nova.objects.flavor')
__import__('nova.objects.floating_ip')
__import__('nova.objects.hv_spec')
__import__('nova.objects.instance')
__import__('nova.objects.instance_action')
__import__('nova.objects.instance_fault')
__import__('nova.objects.instance_group')
__import__('nova.objects.instance_info_cache')
__import__('nova.objects.instance_numa_topology')
__import__('nova.objects.instance_pci_requests')
__import__('nova.objects.keypair')
__import__('nova.objects.migration')
__import__('nova.objects.network')
__import__('nova.objects.network_request')
__import__('nova.objects.numa')
__import__('nova.objects.pci_device')
__import__('nova.objects.pci_device_pool')
__import__('nova.objects.tag')
__import__('nova.objects.quotas')
__import__('nova.objects.security_group')
__import__('nova.objects.security_group_rule')
__import__('nova.objects.service')
__import__('nova.objects.virt_cpu_topology')
__import__('nova.objects.virtual_interface')
| {
"content_hash": "5b60a6a45c229a48ec412ec23407ddad",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 71,
"avg_line_length": 43.945945945945944,
"alnum_prop": 0.6439114391143912,
"repo_name": "shakamunyi/nova",
"id": "6ab4ee0fad6a311f66ab7ca26f9bd019c65774ff",
"size": "2612",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "nova/objects/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15322211"
},
{
"name": "Shell",
"bytes": "17730"
},
{
"name": "Smarty",
"bytes": "489682"
}
],
"symlink_target": ""
} |
from sequana.enrichment.mart import Mart
import pytest
@pytest.mark.xfail(reason="too slow or service may be down")
def test_mart():
conv = Mart(dataset="mmusculus_gene_ensembl")
# you could choose hsapiens_gene_ensembl for instance
df = conv.query()
df.set_index("ensembl_gene_id")
# conv.save(df)
| {
"content_hash": "c4685523577776f52827dcee9b26e6f9",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 60,
"avg_line_length": 27.083333333333332,
"alnum_prop": 0.6984615384615385,
"repo_name": "sequana/sequana",
"id": "72c322ea6f92b1b0b6e38100918c49908f76392f",
"size": "325",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/enrichment/test_mart.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6314"
},
{
"name": "Dockerfile",
"bytes": "1693"
},
{
"name": "HTML",
"bytes": "5379"
},
{
"name": "JavaScript",
"bytes": "686"
},
{
"name": "Jupyter Notebook",
"bytes": "1990042"
},
{
"name": "Python",
"bytes": "1509148"
},
{
"name": "R",
"bytes": "60806"
},
{
"name": "Shell",
"bytes": "2553"
},
{
"name": "Singularity",
"bytes": "4235"
}
],
"symlink_target": ""
} |
"""Support for SimpliSafe alarm control panels."""
import logging
import re
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.const import (
CONF_CODE, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DATA_CLIENT, DOMAIN, TOPIC_UPDATE
_LOGGER = logging.getLogger(__name__)
ATTR_ALARM_ACTIVE = 'alarm_active'
ATTR_TEMPERATURE = 'temperature'
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up a SimpliSafe alarm control panel based on existing config."""
pass
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up a SimpliSafe alarm control panel based on a config entry."""
systems = hass.data[DOMAIN][DATA_CLIENT][entry.entry_id]
async_add_entities([
SimpliSafeAlarm(system, entry.data.get(CONF_CODE))
for system in systems
], True)
class SimpliSafeAlarm(alarm.AlarmControlPanel):
"""Representation of a SimpliSafe alarm."""
def __init__(self, system, code):
"""Initialize the SimpliSafe alarm."""
self._async_unsub_dispatcher_connect = None
self._attrs = {}
self._code = code
self._system = system
self._state = None
@property
def unique_id(self):
"""Return the unique ID."""
return self._system.system_id
@property
def name(self):
"""Return the name of the device."""
return self._system.address
@property
def code_format(self):
"""Return one or more digits/characters."""
if not self._code:
return None
if isinstance(self._code, str) and re.search('^\\d+$', self._code):
return alarm.FORMAT_NUMBER
return alarm.FORMAT_TEXT
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attrs
def _validate_code(self, code, state):
"""Validate given code."""
check = self._code is None or code == self._code
if not check:
_LOGGER.warning("Wrong code entered for %s", state)
return check
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def update():
"""Update the state."""
self.async_schedule_update_ha_state(True)
self._async_unsub_dispatcher_connect = async_dispatcher_connect(
self.hass, TOPIC_UPDATE, update)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect dispatcher listener when removed."""
if self._async_unsub_dispatcher_connect:
self._async_unsub_dispatcher_connect()
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
if not self._validate_code(code, 'disarming'):
return
await self._system.set_off()
async def async_alarm_arm_home(self, code=None):
"""Send arm home command."""
if not self._validate_code(code, 'arming home'):
return
await self._system.set_home()
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
if not self._validate_code(code, 'arming away'):
return
await self._system.set_away()
async def async_update(self):
"""Update alarm status."""
from simplipy.system import SystemStates
self._attrs[ATTR_ALARM_ACTIVE] = self._system.alarm_going_off
if self._system.temperature:
self._attrs[ATTR_TEMPERATURE] = self._system.temperature
if self._system.state == SystemStates.error:
return
if self._system.state == SystemStates.off:
self._state = STATE_ALARM_DISARMED
elif self._system.state in (SystemStates.home,
SystemStates.home_count):
self._state = STATE_ALARM_ARMED_HOME
elif self._system.state in (SystemStates.away, SystemStates.away_count,
SystemStates.exit_delay):
self._state = STATE_ALARM_ARMED_AWAY
else:
self._state = None
| {
"content_hash": "291a798c725127ca8d467cc67c46ac78",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 79,
"avg_line_length": 31.94927536231884,
"alnum_prop": 0.6182807892946246,
"repo_name": "aequitas/home-assistant",
"id": "2cbe5632b6b5d0b7b6fec2523420b6d90079507e",
"size": "4409",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/simplisafe/alarm_control_panel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15601734"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('spidergraphs.views',
url(r'^$','index'),
url(r'^(?P<institution>[a-z_]+)$','institution'),
url(r'^(?P<institution>[a-z_]+)/(?P<program_id>[\d]+)$','program'),
url(r'^(?P<institution>[a-z_]+)/(?P<program_id>[\d]+)/(?P<course_id>[\d]+)$','course'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
)
urlpatterns += patterns('',
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "24c4b4357fad753a68a034eb067cb9c2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 91,
"avg_line_length": 39.888888888888886,
"alnum_prop": 0.649025069637883,
"repo_name": "LaneCommunityCollege/dqpprototype",
"id": "df017ddc0a868f7a0b022be27a4894b2b4c771c4",
"size": "718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dqp/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "211072"
},
{
"name": "Python",
"bytes": "32722"
}
],
"symlink_target": ""
} |
class FlankingRegionCollection(object):
"""
Used to store information genomic regions that are 'flanking', meaning they are not really
involved in the structural variant; use this to identify reads mapping to the flanking regions
to provide more context for events and to better categorize ambiguously mapping reads (ie those
that in the flanks vs those that don't really match either allele)
"""
def __init__(self, variant):
self.variant = variant
commonSegmentIDs = variant.commonSegments()
# allele -> part -> segment-bounds
self.alleleFlanks = {}
for allele in ["ref", "alt"]:
self.alleleFlanks[allele] = AlleleFlankingRegion(variant, allele, commonSegmentIDs)
def isFlanking(self, alignmentSet, allele):
return self.alleleFlanks[allele].isFlanking(alignmentSet)
class AlleleFlankingRegion(object):
def __init__(self, variant, allele, commonSegmentIDs):
self.partsToFlankingRegions = {}
for part in variant.chromParts(allele):
curpos = 0
self.partsToFlankingRegions[part.id] = []
for segment in part.segments:
end = curpos + len(segment) - 1
if segment.id in commonSegmentIDs:
flankingRegion = {"part":part.id, "segment":segment.id, "start":curpos, "end":end}
self.partsToFlankingRegions[part.id].append(flankingRegion)
curpos = end + 1
def isFlanking(self, alignmentSet):
alignments = alignmentSet.getAlignments()
segments = set()
partIDs = set()
for alignment in alignments:
partID = alignment.regionID
partIDs.add(partID)
if len(partIDs) > 1:
return False
for flankingRegion in self.partsToFlankingRegions[partID]:
if flankingRegion["start"] < alignment.start and alignment.end < flankingRegion["end"]:
segments.add(flankingRegion["segment"])
if len(segments) > 1: return False
if len(segments) == 1:
return True
return False
| {
"content_hash": "dffc58b322b2473dcfe313169e5e2d9f",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 103,
"avg_line_length": 35.03225806451613,
"alnum_prop": 0.6206261510128913,
"repo_name": "gatoravi/svviz",
"id": "68441d1bd42bac2391b338a769aa8a469c005ff8",
"size": "2172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/svviz/flanking.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "35516"
},
{
"name": "CSS",
"bytes": "1393"
},
{
"name": "HTML",
"bytes": "6173"
},
{
"name": "JavaScript",
"bytes": "24343"
},
{
"name": "Python",
"bytes": "210456"
}
],
"symlink_target": ""
} |
import pytest
import doctest
from insights.parsers.hostname import Hostname as HnF
from insights import SkipComponent
from insights.parsers.lssap import Lssap
from insights.parsers.saphostctrl import SAPHostCtrlInstances
from insights.combiners import sap
from insights.combiners.sap import Sap
from insights.combiners.hostname import Hostname
from insights.tests import context_wrap
from insights.tests.parsers.test_saphostctrl import SAPHOSTCTRL_HOSTINSTANCES_DOCS, SAPHOSTCTRL_HOSTINSTANCES_GOOD
Lssap_nw_TEST = """
- lssap version 1.0 -
==========================================
SID Nr Instance SAPLOCALHOST Version DIR_EXECUTABLE
HA2| 16| D16| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/D16/exe
HA2| 22| D22| lu0416|749, patch 10, changelist 1698137| /usr/sap/HA2/D22/exe
HA2| 50| D50| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/D50/exe
HA2| 51| D51| lu0418|749, patch 10, changelist 1698137| /usr/sap/HA2/D51/exe
HA2| 52| D52| lu0418|749, patch 10, changelist 1698137| /usr/sap/HA2/D52/exe
==========================================
""".strip()
Lssap_hana_TEST = """
- lssap version 1.0 -
==========================================
SID Nr Instance SAPLOCALHOST Version DIR_EXECUTABLE
HA2| 16| HDB16| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/HDB16/exe
==========================================
""".strip()
Lssap_ascs_TEST = """
- lssap version 1.0 -
==========================================
SID Nr Instance SAPLOCALHOST Version DIR_EXECUTABLE
HA2| 16| ASCS16| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/ASCS16/exe
==========================================
""".strip()
Lssap_all_TEST = """
- lssap version 1.0 -
==========================================
SID Nr Instance SAPLOCALHOST Version DIR_EXECUTABLE
HA2| 16| D16| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/D16/exe
HA2| 22| D22| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/D22/exe
HA2| 16| HDB16| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/HDB16/exe
HA2| 16| ASCS16| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/ASCS16/exe
==========================================
""".strip()
Lssap_doc_TEST = """
- lssap version 1.0 -
==========================================
SID Nr Instance SAPLOCALHOST Version DIR_EXECUTABLE
HA2| 16| D16| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/D16/exe
HA2| 22| D22| lu0418|749, patch 10, changelist 1698137| /usr/sap/HA2/D22/exe
HA2| 16| HDB16| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/HDB16/exe
==========================================
""".strip()
HOSTNAME = 'lu0417.example.com'
HOSTNAME1 = 'li-ld-1810.example.com'
SAPHOSTCTRL_HOSTINSTANCES_R_CASE = '''
*********************************************************
SID , String , R4D
SystemNumber , String , 12
InstanceName , String , DVEBMGS12
InstanceType , String , Primary Application Server
Hostname , String , r4d00
FullQualifiedHostname , String , r4d00.example.corp
SapVersionInfo , String , 753, patch 501, changelist 1967207
*********************************************************
SID , String , R4D
SystemNumber , String , 10
InstanceName , String , ASCS10
InstanceType , String , ABAP Central Services
Hostname , String , r4d01
FullQualifiedHostname , String , r4d01.example.corp
SapVersionInfo , String , 753, patch 501, changelist 1967207
*********************************************************
SID , String , WDX
SystemNumber , String , 20
InstanceName , String , W20
InstanceType , String , WebDispatcher
Hostname , String , r4d02
FullQualifiedHostname , String , host_97.example.corp
SapVersionInfo , String , 773, patch 121, changelist 1917131
*********************************************************
SID , String , SMD
SystemNumber , String , 98
InstanceName , String , SMDA98
InstanceType , String , Solution Manager Diagnostic Agent
Hostname , String , r4d01
FullQualifiedHostname , String , host_97.example.corp
SapVersionInfo , String , 745, patch 400, changelist 1734487
*********************************************************
SID , String , SMD
SystemNumber , String , 97
InstanceName , String , SMDA97
InstanceType , String , Solution Manager Diagnostic Agent
Hostname , String , r4d00
FullQualifiedHostname , String , host_97.example.corp
SapVersionInfo , String , 745, patch 400, changelist 1734487
'''
HOSTNAME2 = 'host_97.example.corp'
def test_lssap_netweaver():
lssap = Lssap(context_wrap(Lssap_nw_TEST))
hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None)
sap = Sap(hn, None, lssap)
assert sap['D50'].number == '50'
assert 'D16' in sap.local_instances
assert 'D51' in sap.all_instances
assert 'D51' not in sap.local_instances
assert sap.is_netweaver is True
assert sap.is_hana is False
assert sap.is_ascs is False
def test_saphostcrtl_hana():
lssap = Lssap(context_wrap(Lssap_nw_TEST))
inst = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES_DOCS))
hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None)
sap = Sap(hn, inst, lssap)
assert 'D50' not in sap
assert sap.local_instances == ['HDB88']
assert 'HDB90' in sap.all_instances
assert sap['HDB88'].number == '88'
assert sap['HDB90'].hostname == 'lu0418'
assert sap['HDB90'].version == '749, patch 211, changelist 1754007'
assert sap.number('HDB90') == '90'
assert sap.sid('HDB88') == 'D89'
assert sap.is_netweaver is False
assert sap.is_hana is True
assert sap.is_ascs is False
def test_saphostcrtl_hana_2():
lssap = Lssap(context_wrap(Lssap_all_TEST))
inst = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES_GOOD))
hn = Hostname(HnF(context_wrap(HOSTNAME1)), None, None, None)
sap = Sap(hn, inst, lssap)
assert 'D50' not in sap
assert 'HDB00' in sap
assert sorted(sap.local_instances) == sorted(['HDB88', 'HDB90', 'SMDA91', 'HDB62', 'HDB00'])
assert sorted(sap.all_instances) == sorted([
'ASCS07', 'ASCS52', 'D54', 'DVEBMGS09', 'ERS08', 'HDB00', 'HDB62',
'HDB88', 'HDB90', 'SCS10', 'SMDA91'])
assert sorted(sap.business_instances) == sorted([
'ASCS07', 'ASCS52', 'D54', 'DVEBMGS09', 'ERS08', 'HDB00', 'HDB62',
'HDB88', 'HDB90', 'SCS10'])
assert sorted(sap.function_instances) == sorted(['SMDA91'])
assert sap['HDB88'].number == '88'
assert sap['HDB90'].hostname == 'li-ld-1810'
assert sap['DVEBMGS09'].version == '749, patch 301, changelist 1779613'
assert sap.version('HDB90') == '749, patch 211, changelist 1754007'
assert sap.hostname('HDB62') == 'd62dbsrv'
assert sap.type('SCS10') == 'SCS'
assert sap.full_type('SCS10') == 'Java Central Services'
assert sap.is_netweaver is True
assert sap.is_hana is True
assert sap.is_ascs is True
def test_lssap_hana():
lssap = Lssap(context_wrap(Lssap_hana_TEST))
hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None)
sap = Sap(hn, None, lssap)
assert 'D50' not in sap
assert sap.is_netweaver is False
assert sap.is_hana is True
assert sap.is_ascs is False
def test_lssap_ascs():
lssap = Lssap(context_wrap(Lssap_ascs_TEST))
hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None)
sap = Sap(hn, None, lssap)
assert sap['ASCS16'].sid == 'HA2'
# ASCS is also a kind of NetWeaver
assert sap.is_netweaver is True
assert sap.is_hana is False
assert sap.is_ascs is True
def test_all():
lssap = Lssap(context_wrap(Lssap_all_TEST))
hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None)
sap = Sap(hn, None, lssap)
assert sap['D16'].version == '749, patch 10, changelist 1698137'
assert sap['ASCS16'].hostname == 'lu0417'
assert sap.is_netweaver is True
assert sap.is_hana is True
assert sap.is_ascs is True
def test_r_case():
saphostctrl = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES_R_CASE))
hn = Hostname(HnF(context_wrap(HOSTNAME2)), None, None, None)
sap = Sap(hn, saphostctrl, None)
assert sorted(sap.local_instances) == sorted(['W20', 'SMDA98', 'SMDA97'])
assert sap['DVEBMGS12'].version == '753, patch 501, changelist 1967207'
assert sap['ASCS10'].hostname == 'r4d01'
assert sap.is_netweaver is True
assert sap.is_hana is False
assert sap.is_ascs is True
def test_doc_examples():
env = {
'saps': Sap(
Hostname(HnF(context_wrap(HOSTNAME)), None, None, None),
None,
Lssap(context_wrap(Lssap_doc_TEST))
)
}
failed, total = doctest.testmod(sap, globs=env)
assert failed == 0
def test_ab():
hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None)
with pytest.raises(SkipComponent) as se:
Sap(hn, None, None)
assert 'No SAP instance.' in str(se)
| {
"content_hash": "98e45f6926e846b8453c02cd961d29f7",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 114,
"avg_line_length": 41.030434782608694,
"alnum_prop": 0.5972236939705415,
"repo_name": "RedHatInsights/insights-core",
"id": "b4b7a196b7f285ae47b634b9e8c3cbcb93454b19",
"size": "9437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insights/tests/combiners/test_sap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "220"
},
{
"name": "Python",
"bytes": "8219046"
},
{
"name": "Shell",
"bytes": "1754"
}
],
"symlink_target": ""
} |
"""Generates a Recamen's sequence."""
import sys
from itertools import count
from itertools import islice
def sequence():
"""Generate Recamen's sequence."""
seen = set()
a = 0
for n in count(1):
yield a
seen.add(a)
c = a - n
if c < 0 or c in seen:
c = a + n
a = c
def write_sequence(filename, num):
"""Write Recamen's sequence to a text file."""
with open(filename, mode='wt', encoding='utf-8') as f:
f.writelines("{0}\n".format(r)
for r in islice(sequence(), num + 1))
if __name__ == "__main__":
write_sequence(sys.argv[1], int(sys.argv[2]))
| {
"content_hash": "b66b0e785c79c46277917c62fcb2a293",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 58,
"avg_line_length": 23.5,
"alnum_prop": 0.5425531914893617,
"repo_name": "carlb15/Python",
"id": "ab706f506443b6b3dc6387c6e23d64d141bb8724",
"size": "658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recaman.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "155451"
}
],
"symlink_target": ""
} |
import json
from django import forms
from django.test.utils import override_settings
from django_webtest import WebTest
from . import build_test_urls
class SelectForm(forms.Form):
test_field = forms.ChoiceField(
choices=((None, 'Select Card Type'), ('V', 'Visa'), ('M', 'MasterCard'), ('P', 'Paypal')),
widget=forms.Select(attrs={'data-test': 'Test Attr'}))
data_field = forms.BooleanField(required=False, widget=forms.HiddenInput, initial=True,
help_text='To produce non empty POST for empty test_field')
@override_settings(ROOT_URLCONF=__name__)
class Test(WebTest):
default_form = SelectForm
def test_default_usecase(self):
page = self.app.get(self.test_default_usecase.url)
self.assertIn('id="id_test_field_container"', page.body.decode('utf-8'))
self.assertIn('id="id_test_field"', page.body.decode('utf-8'))
self.assertIn('data-test="Test Attr"', page.body.decode('utf-8'))
form = page.form
self.assertIn('test_field', form.fields)
form['test_field'] = 'V'
response = json.loads(form.submit().body.decode('utf-8'))
self.assertIn('cleaned_data', response)
self.assertIn('test_field', response['cleaned_data'])
self.assertEqual('V', response['cleaned_data']['test_field'])
def test_missing_value_error(self):
form = self.app.get(self.test_missing_value_error.url).form
response = form.submit()
self.assertIn('has-error', response.body.decode('utf-8'))
self.assertIn('This field is required.', response.body.decode('utf-8'))
def test_part_group_class(self):
page = self.app.get(self.test_part_group_class.url)
self.assertIn('class="yellow required select-field col s12"', page.body.decode('utf-8'))
test_part_group_class.template = '''
{% form %}
{% attr form.test_field 'group' class override %}yellow required select-field col s12{% endattr %}
{% endform %}
'''
def test_part_add_group_class(self):
page = self.app.get(self.test_part_add_group_class.url)
self.assertIn('class="select-field col s12 required orange"', page.body.decode('utf-8'))
test_part_add_group_class.template = '''
{% form %}
{% attr form.test_field 'group' class append %}orange{% endattr %}
{% endform %}
'''
def test_part_prefix(self):
response = self.app.get(self.test_part_prefix.url)
self.assertIn('<i class="mdi-communication-email prefix"></i>', response.body.decode('utf-8'))
test_part_prefix.template = '''
{% form %}
{% part form.test_field prefix %}<i class="mdi-communication-email prefix"></i>{% endpart %}
{% endform %}
'''
def test_part_add_control_class(self):
response = self.app.get(self.test_part_add_control_class.url)
self.assertIn('class="orange"', response.body.decode('utf-8'))
test_part_add_control_class.template = '''
{% form %}
{% attr form.test_field 'widget' class append %}orange{% endattr %}
{% endform %}
'''
def test_part_label(self):
response = self.app.get(self.test_part_label.url)
self.assertIn('<label for="id_test_field">My label</label>', response.body.decode('utf-8'))
test_part_label.template = '''
{% form %}
{% part form.test_field label %}<label for="id_test_field">My label</label>{% endpart %}
{% endform %}
'''
def test_part_add_label_class(self):
response = self.app.get(self.test_part_add_label_class.url)
self.assertIn('class="green-text"', response.body.decode('utf-8'))
test_part_add_label_class.template = '''
{% form %}
{% attr form.test_field 'label' class append %}green-text{% endattr %}
{% endform %}
'''
def test_part_help_text(self):
response = self.app.get(self.test_part_help_text.url)
self.assertIn('<div class="help-block">My help</div>', response.body.decode('utf-8'))
test_part_help_text.template = '''
{% form %}
{% part form.test_field help_text %}<div class="help-block">My help</div>{% endpart %}
{% endform %}
'''
def test_part_errors(self):
response = self.app.get(self.test_part_errors.url)
self.assertIn('<div class="errors"><small class="error">My Error</small></div>', response.body.decode('utf-8'))
test_part_errors.template = '''
{% form %}
{% part form.test_field errors%}<div class="errors"><small class="error">My Error</small></div>{% endpart %}
{% endform %}
'''
urlpatterns = build_test_urls(Test)
| {
"content_hash": "4508bdc88a299b74cda7ca606cae4fbb",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 122,
"avg_line_length": 37.32283464566929,
"alnum_prop": 0.6046413502109704,
"repo_name": "viewflow/django-material",
"id": "8e6ab8252930233a8f3b0f2879af7f3389c9298e",
"size": "4740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_widget_select.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "525907"
},
{
"name": "HTML",
"bytes": "51218"
},
{
"name": "JavaScript",
"bytes": "269256"
},
{
"name": "Python",
"bytes": "202324"
},
{
"name": "SCSS",
"bytes": "20762"
}
],
"symlink_target": ""
} |
from .streamer import Streamer
from .router import Router
from .about import (__name__, __version__, __author__, __author_email__,
__description__, __license__, __url__)
read_json = Streamer.read_json
__all__ = [Router, Streamer, read_json,
__name__, __version__, __author__, __author_email__,
__description__, __license__, __url__]
__version__ = "0.0.2"
| {
"content_hash": "e10d9ce9c6b656ef726058183457e27a",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 72,
"avg_line_length": 36.27272727272727,
"alnum_prop": 0.5513784461152882,
"repo_name": "mediawiki-utilities/python-mwcli",
"id": "3e0b26632872810416ad11de32d7f5be422aba83",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mwcli/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13885"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
} |
"""
# Ordered Player
The ordered player, plays the first available move from top to bottom,
from left to right
"""
from supercat.classes import BasePlayer
from supercat.utils import boxes
class Player(BasePlayer):
name = 'meta'
def play(self, world, game, move_num, last_move):
# lazy game
if game is not None:
# should play this game
for col, row in boxes():
if world[game][row, 2-col] is None:
return game, (row, 2-col)
else:
# free play!
for grand_row, grand_col in boxes():
if world[2-grand_row, grand_col]['owner'] is not None:
continue
for col, row in boxes():
if world[2-grand_row, grand_col][row, 2-col] is None:
return (2-grand_row, grand_col), (row, 2-col)
return None, None
| {
"content_hash": "828be7b44ace78ff9820c2991595c407",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 73,
"avg_line_length": 29.387096774193548,
"alnum_prop": 0.5389681668496158,
"repo_name": "developingo/supercat",
"id": "b0f63c115f9bbcbb2e0223b9a7e889d272403aa6",
"size": "911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supercat/players/meta.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17426"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
from django.template import Context
from django.template.loader import render_to_string, get_template
from django.conf import settings
from django.core.exceptions import ValidationError
from confy import env
import json
from django.utils.safestring import SafeText
from django.utils.crypto import get_random_string
import os
from . import models
from django.core import serializers
import collections
import datetime
"""
This is a upload wrapper for the ajax uploader widget for django forms.
"""
def getPredefinedCondition(request,precond_id):
if request.user.is_staff:
object_hash = models.ConditionPredefined.objects.get(id=precond_id,status=1)
json_hash = serializers.serialize('json', [object_hash])
#listing = buildListingArray(object_hash)
#json_hash = json.dumps(object_hash)
else:
json_hash = json.dumps({"error":"Access Denied"})
return HttpResponse(json_hash, content_type='text/html')
| {
"content_hash": "f4a7611bdcd2fed448d396eba6244a74",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 83,
"avg_line_length": 32.833333333333336,
"alnum_prop": 0.7654822335025381,
"repo_name": "parksandwildlife/statdev",
"id": "4b9d07cd3748b3137700f58f41deea530b27573c",
"size": "985",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "applications/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "540"
},
{
"name": "HTML",
"bytes": "522817"
},
{
"name": "JavaScript",
"bytes": "28979"
},
{
"name": "Python",
"bytes": "881972"
}
],
"symlink_target": ""
} |
import sys
import os
import unittest
from mock import patch, MagicMock, ANY, call
import pylacuna.cachedmap as cachedmap
import ast
from sys import version_info
if version_info.major == 2:
import __builtin__ as builtins # pylint:disable=import-error
else:
import builtins # pylint:disable=import-error
def mock_api(route, method, params=None):
if method == 'view':
print "MOCK: Returning individual view"
return None
if method == 'get_buildings':
print "MOCK: Returning get buildings response"
return None
print "MOCK: Returning mock"
return MagicMock()
class testCachedMap(unittest.TestCase):
def setUp(self):
# Create session mock
self.session_mock = MagicMock()
self.session_mock.call_method_with_session_id.side_effect = mock_api
# Creaet body mock
self.mock_map = MagicMock()
# Patch out stdout
# use 'reload(sys)' in a test to undo
# patcher = patch('sys.stdout')
# patcher.start()
# self.addCleanup(patcher.stop)
def tearDown(self):
pass
def test_init_and_print(self):
mymap = cachedmap.CachedMap(self.session_mock, self.mock_map)
print self.mock_map.mock_calls
print self.session_mock.mock_calls
print mymap
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "7e575e50f7bd9b85395d59fbbec1a890",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 76,
"avg_line_length": 25.754716981132077,
"alnum_prop": 0.6454212454212455,
"repo_name": "miketwo/pylacuna",
"id": "e780dad2fde0121fd2411aa68a4359547ff7b56c",
"size": "1387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylacuna/tests/test_cachedmap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "160215"
},
{
"name": "Shell",
"bytes": "107"
}
],
"symlink_target": ""
} |
import flask
import requests
mailgun_key = None
def set_mailgun_key(mgkey):
global mailgun_key
mailgun_key = mgkey
def send_welcome(addr, link, other_email):
return requests.post(
"https://api.mailgun.net/v2/tokd.wtf/messages",
auth=("api", mailgun_key),
data={
"from": "Chessmaster <chess@tokd.wtf>",
"to": [addr],
"subject": "Your chess game with %s has begun" % other_email,
"text": "Come play chess! Your unique game link (don't share it "
"with others) is %s" % link,
"html": flask.render_template(
"welcome_email.html", game_url=link, other_email=other_email),
})
def send_move_email(addr, link, other_email, last_move):
return requests.post(
"https://api.mailgun.net/v2/tokd.wtf/messages",
auth=("api", mailgun_key),
data={
"from": "Chessmaster <chess@tokd.wtf>",
"to": [addr],
"subject": "Your move against %s" % other_email,
"text": "It's your turn! Your unique game link (in case you "
"forgot it) is %s" % link,
"html": flask.render_template(
"move_email.html",
game_url=link, other_email=other_email, last_move=last_move),
})
| {
"content_hash": "d65619880b1a99ef11819dc942db8387",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 35.945945945945944,
"alnum_prop": 0.5473684210526316,
"repo_name": "haldean/chess",
"id": "02d537a245d783be631e77cbda05e313f28ec026",
"size": "1330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/emails.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "32766"
},
{
"name": "JavaScript",
"bytes": "1216495"
},
{
"name": "Makefile",
"bytes": "1574"
},
{
"name": "Python",
"bytes": "80098"
},
{
"name": "Shell",
"bytes": "659"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/base/shared_base_pauldron_both.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","default_pauldrons")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "2131eccc70852734954e2ae91f24715d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 81,
"avg_line_length": 24.923076923076923,
"alnum_prop": 0.7129629629629629,
"repo_name": "obi-two/Rebelion",
"id": "fa0c6ce734b401c04a5055091c96155ae2d84d7b",
"size": "469",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/wearables/base/shared_base_pauldron_both.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from django.core.exceptions import ValidationError
from django.forms import fields as django_fields
from django.utils.translation import ugettext_lazy as _
from metashare.repository import validators
from metashare.repository.editor.widgets import LangDictWidget
class DictField(django_fields.Field):
"""
A form field which represents a Python dictionary.
A `required` `DictField` will be validated to not be empty.
"""
# custom validation error messages
custom_error_messages = {
# pylint: disable-msg=E1102
'duplicate_key': _(u'There may be only one entry with key "{}".'),
}
def __init__(self, max_key_length=None, max_val_length=None, **kwargs):
"""
Initializes a new `DictField`.
The `max_key_length`/`max_val_length` arguments specify the maximum
length of a dictionary entry key/value.
"""
self.max_key_length = max_key_length
self.max_val_length = max_val_length
# we only work with `DictWidget`s
kwargs['widget'] = LangDictWidget(
blank=not kwargs.get('required', True),
max_key_length=self.max_key_length,
max_val_length=self.max_val_length)
# add our custom error messages
updated_error_messages = {}
updated_error_messages.update(DictField.custom_error_messages)
if 'error_messages' in kwargs:
updated_error_messages.update(kwargs['error_messages'])
kwargs['error_messages'] = updated_error_messages
# let our parent do the rest
super(DictField, self).__init__(**kwargs)
def to_python(self, value):
"""
Converts the list of key/value pairs from `DictWidget` to a Python
dictionary, making sure that there is no duplicate key.
"""
if value is None:
return None
result = {}
for key, val in value:
# ensure that there is no duplicate key in the provided pairs
if key in result:
raise ValidationError(self.error_messages['duplicate_key']
.format(key))
result[key] = val
return result
class XmlCharField(django_fields.CharField):
"""
A `CharField` which only allows the characters that match the Char
production from XML 1.0 (Second Edition), cf.
http://www.w3.org/TR/2000/WD-xml-2e-20000814#NT-Char.
"""
default_error_messages = {
# pylint: disable-msg=E1102
'invalid': _(u'The character at position %(char_pos)s '
u'(&#x%(char_code)04x;) must not be used. Enter a string '
u'consisting of only characters that match the Char production '
u'from XML 1.0 (Second Edition), cf. '
u'http://www.w3.org/TR/2000/WD-xml-2e-20000814#NT-Char.'),
}
default_validators = [validators.validate_matches_xml_char_production]
| {
"content_hash": "ca47006fe0bcdd113471bc51ce91d6fa",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 76,
"avg_line_length": 39.053333333333335,
"alnum_prop": 0.6237623762376238,
"repo_name": "JuliBakagianni/META-SHARE",
"id": "b05dc2faf9adef84c8cf54248b39de1561617a83",
"size": "2929",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "metashare/repository/editor/form_fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7362"
},
{
"name": "C",
"bytes": "321"
},
{
"name": "C++",
"bytes": "112277"
},
{
"name": "CSS",
"bytes": "125097"
},
{
"name": "HTML",
"bytes": "2956138"
},
{
"name": "Java",
"bytes": "12780"
},
{
"name": "JavaScript",
"bytes": "201032"
},
{
"name": "Makefile",
"bytes": "6778"
},
{
"name": "Python",
"bytes": "4084834"
},
{
"name": "Shell",
"bytes": "121386"
},
{
"name": "XSLT",
"bytes": "473763"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/food/crafted/shared_dish_vercupti_of_agazza_boleruuee.iff"
result.attribute_template_id = 5
result.stfName("food_name","vercupti")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "e06e1be2e24f26091b88d5bde3df7b21",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 94,
"avg_line_length": 24.76923076923077,
"alnum_prop": 0.7080745341614907,
"repo_name": "anhstudios/swganh",
"id": "65970063b5a640a7654846e14126350a8f3196dc",
"size": "467",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/food/crafted/shared_dish_vercupti_of_agazza_boleruuee.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import os
import sys
import urllib2
import xbmc
import xbmcgui
import xbmcplugin
import socket
from pulsar.config import PULSARD_HOST
from pulsar.addon import ADDON, ADDON_ID
from pulsar.platform import PLATFORM
try:
import simplejson as json
except ImportError:
import json
HANDLE = int(sys.argv[1])
class closing(object):
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
class NoRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
import urllib
infourl = urllib.addinfourl(fp, headers, headers["Location"])
infourl.status = code
infourl.code = code
return infourl
http_error_300 = http_error_302
http_error_301 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def _json(url):
with closing(urllib2.urlopen(url)) as response:
if response.code >= 300 and response.code <= 307:
item = xbmcgui.ListItem(path=response.geturl(), thumbnailImage=xbmc.getInfoLabel("ListItem.Art(thumb)"))
xbmcplugin.setResolvedUrl(HANDLE, True, item)
return
payload = response.read()
if payload:
return json.loads(payload)
def run(url_suffix=""):
if not os.path.exists(os.path.join(xbmc.translatePath(ADDON.getAddonInfo("path")), ".firstrun")):
from pulsar.util import notify
notify("You must restart XBMC before using Pulsar")
return
socket.setdefaulttimeout(300)
urllib2.install_opener(urllib2.build_opener(NoRedirectHandler()))
url = sys.argv[0].replace("plugin://%s" % ADDON_ID, PULSARD_HOST + url_suffix) + sys.argv[2]
xbmc.log(url)
data = _json(url)
if not data:
return
if data["content_type"]:
xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_DATE)
xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_GENRE)
xbmcplugin.setContent(HANDLE, data["content_type"])
listitems = range(len(data["items"]))
for i, item in enumerate(data["items"]):
listItem = xbmcgui.ListItem(label=item["label"], label2=item["label2"], iconImage=item["icon"], thumbnailImage=item["thumbnail"])
if item.get("info"):
listItem.setInfo("video", item["info"])
if item.get("stream_info"):
for type_, values in item["stream_info"].items():
listItem.addStreamInfo(type_, values)
if item.get("art"):
listItem.setArt(item["art"])
if item.get("context_menu"):
listItem.addContextMenuItems(item["context_menu"])
listItem.setProperty("isPlayable", item["is_playable"] and "true" or "false")
if item.get("properties"):
for k, v in item["properties"].items():
listItem.setProperty(k, v)
listitems[i] = (item["path"], listItem, not item["is_playable"])
xbmcplugin.addDirectoryItems(HANDLE, listitems, totalItems=len(listitems))
xbmcplugin.endOfDirectory(HANDLE, succeeded=True, updateListing=False, cacheToDisc=True)
| {
"content_hash": "4d0e86aec5e684597465869af3b19be2",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 137,
"avg_line_length": 35.61702127659574,
"alnum_prop": 0.6574074074074074,
"repo_name": "steeve/plugin.video.pulsar",
"id": "489e33f8b837c6add94739bb7283fcc19d2c4dad",
"size": "3348",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "resources/site-packages/pulsar/navigation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "967"
},
{
"name": "Python",
"bytes": "157058"
}
],
"symlink_target": ""
} |
"""Encode the data using pre-trained VAE on dataspace.
This script encodes the instances in dataspace (x) from the training set into
distributions in the latent space (z) using the pre-trained the models from
`train_dataspace.py`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import os
from magenta.models.latent_transfer import common
from magenta.models.latent_transfer import model_dataspace
import numpy as np
import tensorflow as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string('config', 'mnist_0',
'The name of the model config to use.')
tf.flags.DEFINE_string('exp_uid', '_exp_0',
'String to append to config for filenames/directories.')
def main(unused_argv):
del unused_argv
# Load Config
config_name = FLAGS.config
config_module = importlib.import_module('configs.%s' % config_name)
config = config_module.config
model_uid = common.get_model_uid(config_name, FLAGS.exp_uid)
batch_size = config['batch_size']
# Load dataset
dataset = common.load_dataset(config)
basepath = dataset.basepath
save_path = dataset.save_path
train_data = dataset.train_data
eval_data = dataset.eval_data
# Make the directory
save_dir = os.path.join(save_path, model_uid)
best_dir = os.path.join(save_dir, 'best')
tf.gfile.MakeDirs(save_dir)
tf.gfile.MakeDirs(best_dir)
tf.logging.info('Save Dir: %s', save_dir)
# Load Model
tf.reset_default_graph()
sess = tf.Session()
m = model_dataspace.Model(config, name=model_uid)
_ = m() # noqa
# Initialize
sess.run(tf.global_variables_initializer())
# Load
m.vae_saver.restore(sess,
os.path.join(best_dir, 'vae_best_%s.ckpt' % model_uid))
# Encode
def encode(data):
"""Encode the data in dataspace to latent spaceself.
This script runs the encoding in batched mode to limit GPU memory usage.
Args:
data: A numpy array of data to be encoded.
Returns:
A object with instances `mu` and `sigma`, the parameters of encoded
distributions in the latent space.
"""
mu_list, sigma_list = [], []
for i in range(0, len(data), batch_size):
start, end = i, min(i + batch_size, len(data))
batch = data[start:end]
mu, sigma = sess.run([m.mu, m.sigma], {m.x: batch})
mu_list.append(mu)
sigma_list.append(sigma)
mu = np.concatenate(mu_list)
sigma = np.concatenate(sigma_list)
return common.ObjectBlob(mu=mu, sigma=sigma)
encoded_train_data = encode(train_data)
tf.logging.info(
'encode train_data: mu.shape = %s sigma.shape = %s',
encoded_train_data.mu.shape,
encoded_train_data.sigma.shape,
)
encoded_eval_data = encode(eval_data)
tf.logging.info(
'encode eval_data: mu.shape = %s sigma.shape = %s',
encoded_eval_data.mu.shape,
encoded_eval_data.sigma.shape,
)
# Save encoded as npz file
encoded_save_path = os.path.join(basepath, 'encoded', model_uid)
tf.gfile.MakeDirs(encoded_save_path)
tf.logging.info('encoded train_data saved to %s',
os.path.join(encoded_save_path, 'encoded_train_data.npz'))
np.savez(
os.path.join(encoded_save_path, 'encoded_train_data.npz'),
mu=encoded_train_data.mu,
sigma=encoded_train_data.sigma,
)
tf.logging.info('encoded eval_data saved to %s',
os.path.join(encoded_save_path, 'encoded_eval_data.npz'))
np.savez(
os.path.join(encoded_save_path, 'encoded_eval_data.npz'),
mu=encoded_eval_data.mu,
sigma=encoded_eval_data.sigma,
)
if __name__ == '__main__':
tf.app.run(main)
| {
"content_hash": "7e0da7362320a6331f6f2d382e1e2a83",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 79,
"avg_line_length": 29.023622047244096,
"alnum_prop": 0.6638632664134563,
"repo_name": "adarob/magenta",
"id": "a8c140c40f977a450c64db3846987b1a19f08c28",
"size": "4271",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "magenta/models/latent_transfer/encode_dataspace.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1933"
},
{
"name": "Python",
"bytes": "2941402"
},
{
"name": "Shell",
"bytes": "24986"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from oslo_log import log as logging
from sahara import conductor
from sahara import context
from sahara.plugins import utils
from sahara.plugins.vanilla import abstractversionhandler as avm
from sahara.plugins.vanilla.hadoop2 import config as c
from sahara.plugins.vanilla.hadoop2 import edp_engine
from sahara.plugins.vanilla.hadoop2 import run_scripts as run
from sahara.plugins.vanilla.hadoop2 import scaling as sc
from sahara.plugins.vanilla.hadoop2 import validation as vl
from sahara.plugins.vanilla import utils as vu
from sahara.plugins.vanilla.v2_4_1 import config_helper as c_helper
from sahara.utils import cluster_progress_ops as cpo
conductor = conductor.API
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class VersionHandler(avm.AbstractVersionHandler):
def __init__(self):
self.pctx = {
'env_confs': c_helper.get_env_configs(),
'all_confs': c_helper.get_plugin_configs()
}
def get_plugin_configs(self):
return self.pctx['all_confs']
def get_node_processes(self):
return {
"Hadoop": [],
"MapReduce": ["historyserver"],
"HDFS": ["namenode", "datanode", "secondarynamenode"],
"YARN": ["resourcemanager", "nodemanager"],
"JobFlow": ["oozie"],
"Hive": ["hiveserver"]
}
def validate(self, cluster):
vl.validate_cluster_creating(self.pctx, cluster)
def update_infra(self, cluster):
pass
def configure_cluster(self, cluster):
c.configure_cluster(self.pctx, cluster)
def start_namenode(self, cluster):
nn = vu.get_namenode(cluster)
self._start_namenode(nn)
@cpo.event_wrapper(
True, step=utils.start_process_event_message('NameNode'))
def _start_namenode(self, nn):
run.format_namenode(nn)
run.start_hadoop_process(nn, 'namenode')
def start_secondarynamenodes(self, cluster):
snns = vu.get_secondarynamenodes(cluster)
if len(snns) == 0:
return
cpo.add_provisioning_step(
snns[0].cluster_id, utils.start_process_event_message(
"SecondaryNameNodes"), len(snns))
for snn in vu.get_secondarynamenodes(cluster):
self._start_secondarynamenode(snn)
@cpo.event_wrapper(True)
def _start_secondarynamenode(self, snn):
run.start_hadoop_process(snn, 'secondarynamenode')
def start_resourcemanager(self, cluster):
rm = vu.get_resourcemanager(cluster)
if rm:
self._start_resourcemanager(rm)
@cpo.event_wrapper(
True, step=utils.start_process_event_message('ResourceManager'))
def _start_resourcemanager(self, snn):
run.start_yarn_process(snn, 'resourcemanager')
def start_historyserver(self, cluster):
hs = vu.get_historyserver(cluster)
if hs:
run.start_historyserver(hs)
def start_oozie(self, cluster):
oo = vu.get_oozie(cluster)
if oo:
run.start_oozie_process(self.pctx, oo)
def start_hiveserver(self, cluster):
hiveserver = vu.get_hiveserver(cluster)
if hiveserver:
run.start_hiveserver_process(self.pctx, hiveserver)
def start_cluster(self, cluster):
self.start_namenode(cluster)
self.start_secondarynamenodes(cluster)
self.start_resourcemanager(cluster)
run.start_dn_nm_processes(utils.get_instances(cluster))
run.await_datanodes(cluster)
self.start_historyserver(cluster)
self.start_oozie(cluster)
self.start_hiveserver(cluster)
self._set_cluster_info(cluster)
def decommission_nodes(self, cluster, instances):
sc.decommission_nodes(self.pctx, cluster, instances)
def validate_scaling(self, cluster, existing, additional):
vl.validate_additional_ng_scaling(cluster, additional)
vl.validate_existing_ng_scaling(self.pctx, cluster, existing)
def scale_cluster(self, cluster, instances):
sc.scale_cluster(self.pctx, cluster, instances)
def _set_cluster_info(self, cluster):
nn = vu.get_namenode(cluster)
rm = vu.get_resourcemanager(cluster)
hs = vu.get_historyserver(cluster)
oo = vu.get_oozie(cluster)
info = {}
if rm:
info['YARN'] = {
'Web UI': 'http://%s:%s' % (rm.management_ip, '8088'),
'ResourceManager': 'http://%s:%s' % (rm.management_ip, '8032')
}
if nn:
info['HDFS'] = {
'Web UI': 'http://%s:%s' % (nn.management_ip, '50070'),
'NameNode': 'hdfs://%s:%s' % (nn.hostname(), '9000')
}
if oo:
info['JobFlow'] = {
'Oozie': 'http://%s:%s' % (oo.management_ip, '11000')
}
if hs:
info['MapReduce JobHistory Server'] = {
'Web UI': 'http://%s:%s' % (hs.management_ip, '19888')
}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {'info': info})
def get_edp_engine(self, cluster, job_type):
if job_type in edp_engine.EdpOozieEngine.get_supported_job_types():
return edp_engine.EdpOozieEngine(cluster)
return None
def get_edp_job_types(self):
return edp_engine.EdpOozieEngine.get_supported_job_types()
def get_edp_config_hints(self, job_type):
return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
def get_open_ports(self, node_group):
return c.get_open_ports(node_group)
| {
"content_hash": "c1b1eaf0b08400cea6145a61f2fbf9ac",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 78,
"avg_line_length": 31.96590909090909,
"alnum_prop": 0.6226448631354425,
"repo_name": "redhat-openstack/sahara",
"id": "d0e186ed4636cd6e83d739df3a7f18c30c202064",
"size": "6209",
"binary": false,
"copies": "3",
"ref": "refs/heads/master-patches",
"path": "sahara/plugins/vanilla/v2_6_0/versionhandler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Mako",
"bytes": "1528"
},
{
"name": "PigLatin",
"bytes": "792"
},
{
"name": "Python",
"bytes": "2771576"
},
{
"name": "Shell",
"bytes": "42673"
}
],
"symlink_target": ""
} |
from flask import Flask, jsonify, abort, request
import os
import socket
import logging
import sys
from subprocess32 import check_call, CalledProcessError, call
from werkzeug.exceptions import HTTPException, default_exceptions
from netaddr import IPAddress, IPNetwork
from pycalico.datastore import IF_PREFIX
from pycalico.datastore_errors import DataStoreError
from pycalico.datastore_datatypes import Endpoint
from pycalico.ipam import SequentialAssignment, IPAMClient
FIXED_MAC = "EE:EE:EE:EE:EE:EE"
CONTAINER_NAME = "libnetwork"
ORCHESTRATOR_ID = "docker"
# How long to wait (seconds) for IP commands to complete.
IP_CMD_TIMEOUT = 5
hostname = socket.gethostname()
client = IPAMClient()
# Return all errors as JSON. From http://flask.pocoo.org/snippets/83/
def make_json_app(import_name, **kwargs):
"""
Creates a JSON-oriented Flask app.
All error responses that you don't specifically
manage yourself will have application/json content
type, and will contain JSON like this (just an example):
{ "message": "405: Method Not Allowed" }
"""
def make_json_error(ex):
response = jsonify(message=str(ex))
response.status_code = (ex.code
if isinstance(ex, HTTPException)
else 500)
return response
app = Flask(import_name, **kwargs)
for code in default_exceptions.iterkeys():
app.error_handler_spec[None][code] = make_json_error
return app
app = make_json_app(__name__)
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.INFO)
app.logger.info("Application started")
@app.route('/Plugin.Activate', methods=['POST'])
def activate():
return jsonify({"Implements": ["NetworkDriver"]})
@app.route('/NetworkDriver.CreateNetwork', methods=['POST'])
def create_network():
# force is required since the request doesn't have the correct mimetype
# If the JSON is malformed, then a BadRequest exception is raised,
# which returns a HTTP 400 response.
json_data = request.get_json(force=True)
# Create the "network" as a profile. The network ID is somewhat unwieldy
# so in future we might want to obtain a human readable name for it.
network_id = json_data["NetworkID"]
app.logger.info("Creating profile %s", network_id)
client.create_profile(network_id)
return jsonify({})
@app.route('/NetworkDriver.DeleteNetwork', methods=['POST'])
def delete_network():
json_data = request.get_json(force=True)
# Remove the network. We don't raise an error if the profile is still
# being used by endpoints. We assume libnetwork will enforce this.
# From https://github.com/docker/libnetwork/blob/master/docs/design.md
# LibNetwork will not allow the delete to proceed if there are any
# existing endpoints attached to the Network.
network_id = json_data["NetworkID"]
app.logger.info("Removing profile %s", network_id)
client.remove_profile(network_id)
return jsonify({})
@app.route('/NetworkDriver.CreateEndpoint', methods=['POST'])
def create_endpoint():
json_data = request.get_json(force=True)
ep_id = json_data["EndpointID"]
net_id = json_data["NetworkID"]
# Create a calico endpoint object which we can populate and return to
# libnetwork at the end of this method.
ep = Endpoint(hostname, "docker", CONTAINER_NAME, ep_id, "active",
FIXED_MAC)
ep.profile_ids.append(net_id)
# This method is split into three phases that have side effects.
# 1) Assigning IP addresses
# 2) Creating VETHs
# 3) Writing the endpoint to the datastore.
#
# A failure in a later phase attempts to roll back the effects of
# the earlier phases.
# First up is IP assignment. By default we assign both IPv4 and IPv6
# addresses.
# IPv4 failures may abort the request if the address couldn't be assigned.
ipv4_and_gateway(ep)
# IPv6 is currently best effort and won't abort the request.
ipv6_and_gateway(ep)
# Next, create the veth.
try:
create_veth(ep)
except CalledProcessError as e:
# Failed to create or configure the veth.
# Back out the IP assignments and the veth creation.
app.logger.exception(e)
backout_ip_assignments(ep)
remove_veth(ep)
abort(500)
# Finally, write the endpoint to the datastore.
try:
client.set_endpoint(ep)
except DataStoreError as e:
# We've failed to write the endpoint to the datastore.
# Back out the IP assignments and the veth creation.
app.logger.exception(e)
backout_ip_assignments(ep)
remove_veth(ep)
abort(500)
# Everything worked, create the JSON and return it to libnetwork.
assert len(ep.ipv4_nets) == 1
assert len(ep.ipv6_nets) <= 1
iface_json = {"ID": 0,
"Address": str(list(ep.ipv4_nets)[0]),
"MacAddress": ep.mac}
if ep.ipv6_nets:
iface_json["AddressIPv6"] = str(list(ep.ipv6_nets)[0])
return jsonify({"Interfaces": [iface_json]})
@app.route('/NetworkDriver.DeleteEndpoint', methods=['POST'])
def delete_endpoint():
json_data = request.get_json(force=True)
ep_id = json_data["EndpointID"]
app.logger.info("Removing endpoint %s", ep_id)
# Remove the endpoint from the datastore, the IPs that were assigned to
# it and the veth. Even if one fails, try to do the others.
ep = None
try:
ep = client.get_endpoint(hostname=hostname,
orchestrator_id="docker",
workload_id=CONTAINER_NAME,
endpoint_id=ep_id)
backout_ip_assignments(ep)
except (KeyError, DataStoreError) as e:
app.logger.exception(e)
app.logger.warning("Failed to unassign IPs for endpoint %s", ep_id)
if ep:
try:
client.remove_endpoint(ep)
except DataStoreError as e:
app.logger.exception(e)
app.logger.warning("Failed to remove endpoint %s from datastore",
ep_id)
# libnetwork expects us to delete the veth pair. (Note that we only need
# to delete one end).
if ep:
remove_veth(ep)
return jsonify({})
@app.route('/NetworkDriver.EndpointOperInfo', methods=['POST'])
def endpoint_oper_info():
json_data = request.get_json(force=True)
ep_id = json_data["EndpointID"]
app.logger.info("Endpoint operation info requested for %s", ep_id)
# Nothing is supported yet, just pass blank data.
return jsonify({"Value": {}})
@app.route('/NetworkDriver.Join', methods=['POST'])
def join():
json_data = request.get_json(force=True)
ep_id = json_data["EndpointID"]
app.logger.info("Joining endpoint %s", ep_id)
ep = client.get_endpoint(hostname=hostname,
orchestrator_id="docker",
workload_id=CONTAINER_NAME,
endpoint_id=ep_id)
ret_json = {
"InterfaceNames": [{
"SrcName": ep.temp_interface_name(),
"DstPrefix": IF_PREFIX
}],
"Gateway": str(ep.ipv4_gateway),
"StaticRoutes": [{
"Destination": "%s/32" % ep.ipv4_gateway,
"RouteType": 1, # 1 = CONNECTED
"NextHop": "",
"InterfaceID": 0 # 1st interface created in EndpointCreate
}]
}
if ep.ipv6_gateway:
ret_json["GatewayIPv6"] = str(ep.ipv6_gateway)
ret_json["StaticRoutes"].append({
"Destination": "%s/128" % ep.ipv6_gateway,
"RouteType": 1, # 1 = CONNECTED
"NextHop": "",
"InterfaceID": 0 # 1st interface created in EndpointCreate
})
return jsonify(ret_json)
@app.route('/NetworkDriver.Leave', methods=['POST'])
def leave():
json_data = request.get_json(force=True)
ep_id = json_data["EndpointID"]
app.logger.info("Leaving endpoint %s", ep_id)
# Noop. There's nothing to do.
return jsonify({})
def assign_ip(version):
"""
Assign a IP address from the configured pools.
:param version: "v4" for IPv4, "v6" for IPv6.
:return: An IPAddress, or None if an IP couldn't be
assigned
"""
ip = None
assert version in ["v4", "v6"]
# For each configured pool, attempt to assign an IP before giving up.
for pool in client.get_ip_pools(version):
assigner = SequentialAssignment()
ip = assigner.allocate(pool)
if ip is not None:
ip = IPAddress(ip)
break
return ip
def unassign_ip(ip):
"""
Unassign a IP address from the configured pools.
:param ip: IPAddress to unassign.
:return: True if the unassignment succeeded. False otherwise.
"""
# For each configured pool, attempt to unassign the IP before giving up.
version = "v%d" % ip.version
for pool in client.get_ip_pools(version):
if ip in pool:
if client.unassign_address(pool, ip):
return True
return False
def ipv4_and_gateway(ep):
# Get the gateway before trying to assign an address. This will avoid
# needing to backout the assignment if fetching the gateway fails.
try:
next_hop = client.get_default_next_hops(hostname)[4]
except KeyError as e:
app.logger.exception(e)
abort(500)
ip = assign_ip("v4")
app.logger.info("Assigned IPv4 %s", ip)
if not ip:
app.logger.error("Failed to allocate IPv4 for endpoint %s",
ep.endpoint_id)
abort(500)
ip = IPNetwork(ip)
ep.ipv4_nets.add(ip)
ep.ipv4_gateway = next_hop
def ipv6_and_gateway(ep):
try:
next_hop6 = client.get_default_next_hops(hostname)[6]
except KeyError:
app.logger.info("Couldn't find IPv6 gateway for endpoint %s. "
"Skipping IPv6 assignment.",
ep.endpoint_id)
else:
ip6 = assign_ip("v6")
if ip6:
ip6 = IPNetwork(ip6)
ep.ipv6_gateway = next_hop6
ep.ipv6_nets.add(ip6)
else:
app.logger.info("Failed to allocate IPv6 address for endpoint %s",
ep.endpoint_id)
def backout_ip_assignments(ep):
for net in ep.ipv4_nets.union(ep.ipv6_nets):
# The unassignment is best effort. Just log if it fails.
if not unassign_ip(net.ip):
app.logger.warn("Failed to unassign IP %s", net.ip)
def create_veth(ep):
# Create the veth
check_call(['ip', 'link',
'add', ep.name,
'type', 'veth',
'peer', 'name', ep.temp_interface_name()],
timeout=IP_CMD_TIMEOUT)
# Set the host end of the veth to 'up' so felix notices it.
check_call(['ip', 'link', 'set', ep.name, 'up'],
timeout=IP_CMD_TIMEOUT)
# Set the mac as libnetwork doesn't do this for us.
check_call(['ip', 'link', 'set',
'dev', ep.temp_interface_name(),
'address', FIXED_MAC],
timeout=IP_CMD_TIMEOUT)
def remove_veth(ep):
# The veth removal is best effort. If it fails then just log.
rc = call(['ip', 'link', 'del', ep.name], timeout=IP_CMD_TIMEOUT)
if rc != 0:
app.logger.warn("Failed to delete veth %s", ep.name)
if __name__ == '__main__':
# Used when being invoked by the flask development server
PLUGIN_DIR = "/usr/share/docker/plugins/"
if not os.path.exists(PLUGIN_DIR):
os.makedirs(PLUGIN_DIR)
with open(os.path.join(PLUGIN_DIR, 'calico.spec'), 'w') as f:
f.write("tcp://localhost:5000")
# Turns on better error messages and reloading support.
app.debug = True
app.run()
| {
"content_hash": "e268354188974a8ab40170499c7e9bd9",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 78,
"avg_line_length": 32.41803278688525,
"alnum_prop": 0.6203961230509903,
"repo_name": "L-MA/calico-docker",
"id": "5c5dfedaf5a0e95238e8d810243aaeac95dbf115",
"size": "12449",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "calico_containers/docker_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "4549"
},
{
"name": "Python",
"bytes": "294215"
},
{
"name": "Shell",
"bytes": "4849"
}
],
"symlink_target": ""
} |
import json
import datetime
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.http import HttpResponse
from dashboard.handler import Handler
from libs.decorator.decorator import render_json
from django.views.decorators.csrf import csrf_exempt
import os
# Create your views here.
def test_current_datetime(request):
now = datetime.datetime.now()
html = "<html><body><h2>Welcome to Xspider! It's Worked! </h2>It is now %s.</body></html>" % (now,)
return HttpResponse(html)
def index(request):
"""
Dashboard Index Page
:param request:
:return:
"""
handler = Handler()
projects = handler.query_projects_status_by_redis()
for project in projects:
if project['total'] >= 0 and project['total'] != project['new']:
_task_num = float(project['total'] - project['new'])
project['success_rate'] = round(100 * project['success'] / _task_num, 2)
project['failed_rate'] = round(100 * project['failed'] / _task_num, 2)
project['invalid_rate'] = round(100 * project['invalid'] / _task_num, 2)
project['schedule'] = round((_task_num / project['total']) * 100, 2)
else:
project['succ_rate'] = 0
project['failed_rate'] = 0
project['invalid_rate'] = 0
project['schedule'] = 0
if project['total_d'] >= 0 and project['total_d'] != project['new_d']:
_task_num = float(project['total_d'] - project['new_d'])
project['success_rate_d'] = round(100 * project['success_d'] / _task_num, 2)
project['failed_rate_d'] = round(100 * project['failed_d'] / _task_num, 2)
project['invalid_rate_d'] = round(100 * project['invalid_d'] / _task_num, 2)
project['schedule_d'] = round((_task_num / project['total_d']) * 100, 2)
else:
project['succ_rate_d'] = 0
project['failed_rate_d'] = 0
project['invalid_rate_d'] = 0
project['schedule_d'] = 0
if project['total_h'] >= 0 and project['total_h'] != project['new_h']:
_task_num = float(project['total_h'] - project['new_h'])
project['success_rate_h'] = round(100 * project['success_h'] / _task_num, 2)
project['failed_rate_h'] = round(100 * project['failed_h'] / _task_num, 2)
project['invalid_rate_h'] = round(100 * project['invalid_h'] / _task_num, 2)
project['schedule_h'] = round((_task_num / project['total_h']) * 100, 2)
else:
project['succ_rate_h'] = 0
project['failed_rate_h'] = 0
project['invalid_rate_h'] = 0
project['schedule_h'] = 0
if project['total_m'] >= 0 and project['total_m'] != project['new_m']:
_task_num = float(project['total_m'] - project['new_m'])
project['success_rate_m'] = round(100 * project['success_m'] / _task_num, 2)
project['failed_rate_m'] = round(100 * project['failed_m'] / _task_num, 2)
project['invalid_rate_m'] = round(100 * project['invalid_m'] / _task_num, 2)
project['schedule_m'] = round((_task_num / project['total_m']) * 100, 2)
else:
project['succ_rate_m'] = 0
project['failed_rate_m'] = 0
project['invalid_rate_m'] = 0
project['schedule_m'] = 0
return render_to_response("index.html", {'projects': projects, 'tasks': None, 'profile': None,})
@csrf_exempt
@render_json
def debug(request, name):
"""
Debug index page
:param request:
:return:
"""
handler = Handler()
projects = handler.query_projects_status_by_redis(name=name)
project = projects[0]
return render_to_response("debug.html", {'project': project})
@csrf_exempt
@render_json
def data(request):
"""
Data index page
:param request:
:return:
"""
handler = Handler()
projects = handler.query_projects_status_by_redis()
return render_to_response("data.html", {"projects": projects})
@csrf_exempt
@render_json
def result(request, name):
"""
Result detail index page
:param request:
:return:
"""
if name is not None:
handler = Handler()
projects = handler.query_projects_status_by_redis(name=name)
return render_to_response("result.html", {"project": projects[0]})
else:
return
# todo
@csrf_exempt
@render_json
def task(request, name):
"""
Task index page
:param request:
:return:
"""
if name is not None:
handler = Handler()
projects = handler.query_projects_status_by_redis(name=name)
return render_to_response("task.html", {"project": projects[0]})
else:
return
#Todo
@csrf_exempt
@render_json
def log(request, name, task_id):
"""
Task index page
:param request:
:return:
"""
if name is not None:
handler = Handler()
task = handler.query_task_by_task_id(name=name, task_id=task_id)
print task
return render_to_response("log.html", {"task": task["task"], 'log': task})
else:
return
#Todo
@csrf_exempt
@render_json
def nodes(request):
"""
API index page
:param request:
:return:
"""
handler = Handler()
nodes = handler.query_nodes_in_redis(node='--all')
# return nodes
return render_to_response("nodes.html", {'nodes': nodes})
@csrf_exempt
@render_json
def node(request, name):
"""
API index page
:param request:
:return:
"""
# name = request.GET.get('node')
# print 'node: ', name
handler = Handler()
nodes = handler.query_nodes_in_redis(node=name)
return nodes
# return render_to_response("nodes.html", {'nodes': nodes})
@csrf_exempt
@render_json
def api(request):
"""
API index page
:param request:
:return:
"""
return render_to_response("api.html")
@csrf_exempt
@render_json
def edit_project(request):
"""
Edit Project Command API
:param request:
:return:
"""
data = request.POST
command = data.get("command")
group = data.get("group")
name = data.get("project")
timeout = data.get("timeout")
status = data.get("status")
priority = data.get("priority")
info = data.get("info")
script = data.get("script")
interval = data.get("interval")
number = data.get("number")
ip_limit = data.get("ip_limit")
if command and name and (
group or timeout or status or priority or info or script or interval or number or ip_limit):
handler = Handler()
result = handler.edit_project_settings(data)
else:
result = {
"status": False,
"project": name,
"message": "Bad Parameters",
"code": 4001,
}
return result
@csrf_exempt
@render_json
def create_project(request):
"""
Create Project Command API
:param request:
:return:
"""
name = request.POST.get("project")
command = request.POST.get("command")
url = request.POST.get("url")
if name and command:
handler = Handler()
if url is not None:
url = url.strip()
result = handler.create_project(name, url)
else:
result = handler.create_project(name)
else:
result = {
"status": False,
"project": name,
"message": "Bad Parameters",
"code": 4001,
}
return result
@csrf_exempt
@render_json
def run_project(request):
"""
Run Projcet Generator Command API
:param request:
:return:
"""
name = request.POST.get("project")
command = request.POST.get("command")
task = request.POST.get("task")
if name and command and task:
handler = Handler()
result = handler.run_processor(name, json.loads(task))
elif name and command:
handler = Handler()
result = handler.run_generator(name)
else:
result = {
"status": False,
"project": name,
"message": "Bad Parameters",
"code": 4001,
}
return result
@csrf_exempt
@render_json
def result_project(request):
"""
Query Projcet Result Command API
:param request:
:return:
"""
name = request.GET.get("project")
page = int(request.GET.get("page", '1'))
rows = int(request.GET.get("rows", '10'))
if name and page > 0 and rows > 0:
handler = Handler()
_result = handler.query_result_by_name(name, page, rows)
result = {
"status": True,
"project": name,
"result": _result,
"message":"Query Result Succeed!",
"code": 2001,
}
else:
result = {
"status": False,
"project": name,
"message": "Bad Parameters",
"code": 4001,
}
return result
@csrf_exempt
@render_json
def task_project(request):
"""
Query Projcet Result Command API
:param request:
:return:
"""
name = request.GET.get("project")
page = int(request.GET.get("page", '1'))
rows = int(request.GET.get("rows", '10'))
if name and page > 0 and rows > 0:
handler = Handler()
_result = handler.query_task_by_name(name, page, rows)
result = {
"status": True,
"project": name,
"result": _result,
"message":"Query Result Succeed!",
"code": 2001,
}
else:
result = {
"status": False,
"project": name,
"message": "Bad Parameters",
"code": 4001,
}
return result
def test(request):
"""
Dashboard Index Page
:param request:
:return:
"""
return render_to_response("dashboard.html", {'jobs': None, 'tasks': None, 'profile': None}) | {
"content_hash": "4220f450926252d5db65d4ba94eca035",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 103,
"avg_line_length": 26.419098143236074,
"alnum_prop": 0.5606425702811245,
"repo_name": "zym1115718204/xspider",
"id": "d4b020b5ee0335ff104cfc2e2f7d4c1afcaf7d2b",
"size": "10009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xspider/dashboard/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "812460"
},
{
"name": "HTML",
"bytes": "705330"
},
{
"name": "JavaScript",
"bytes": "5867"
},
{
"name": "PHP",
"bytes": "2199"
},
{
"name": "Python",
"bytes": "14618343"
},
{
"name": "Shell",
"bytes": "2176"
}
],
"symlink_target": ""
} |
from task import Task
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, failure, returnValue
from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations
from voltha.extensions.omci.omci_me import MEFrame
from voltha.extensions.omci.omci_frame import OmciFrame
from voltha.extensions.omci.omci_messages import OmciCreate, OmciSet, OmciDelete
from voltha.extensions.omci.omci_entities import EntityClass
RC = ReasonCodes
OP = EntityOperations
class ModifyException(Exception):
pass
class OmciModifyRequest(Task):
"""
OpenOMCI Generic Create, Set, or Delete Frame support Task.
This task allows an ONU to send a Create, Set, or Delete request from any point in their
code while properly using the OMCI-CC channel. Direct access to the OMCI-CC object
to send requests by an ONU is highly discouraged.
"""
task_priority = 128
name = "ONU OMCI Modify Task"
def __init__(self, omci_agent, device_id, frame, priority=task_priority, exclusive=False):
"""
Class initialization
:param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
:param device_id: (str) ONU Device ID
:param frame: (OmciFrame) Frame to send
:param priority: (int) OpenOMCI Task priority (0..255) 255 is the highest
:param exclusive: (bool) True if this GET request Task exclusively own the
OMCI-CC while running. Default: False
"""
super(OmciModifyRequest, self).__init__(OmciModifyRequest.name,
omci_agent,
device_id,
priority=priority,
exclusive=exclusive)
self._device = omci_agent.get_device(device_id)
self._frame = frame
self._results = None
self._local_deferred = None
# Validate message type
self._msg_type = frame.fields['message_type']
if self._msg_type not in (OmciCreate.message_id, OmciSet.message_id, OmciDelete.message_id):
raise TypeError('Invalid Message type: {}, must be Create, Set, or Delete'.
format(self._msg_type))
def cancel_deferred(self):
super(OmciModifyRequest, self).cancel_deferred()
d, self._local_deferred = self._local_deferred, None
try:
if d is not None and not d.called:
d.cancel()
except:
pass
@property
def success_code(self):
"""
Return the OMCI success/reason code for the Get Response.
"""
if self._results is None:
return None
return self._results.fields['omci_message'].fields['success_code']
@property
def illegal_attributes_mask(self):
"""
For Create & Set requests, a failure may indicate that one or more
attributes have an illegal value. This property returns any illegal
attributes
:return: None if not a create/set request, otherwise the attribute mask
of illegal attributes
"""
if self._results is None:
return None
omci_msg = self._results.fields['omci_message'].fields
if self._msg_type == OmciCreate.message_id:
if self.success_code != RC.ParameterError:
return 0
return omci_msg['parameter_error_attributes_mask']
elif self._msg_type == OmciSet.message_id:
if self.success_code != RC.AttributeFailure:
return 0
return omci_msg['failed_attributes_mask']
return None
@property
def unsupported_attributes_mask(self):
"""
For Set requests, a failure may indicate that one or more attributes
are not supported by this ONU. This property returns any those unsupported attributes
:return: None if not a set request, otherwise the attribute mask of any illegal
parameters
"""
if self._msg_type != OmciSet.message_id or self._results is None:
return None
if self.success_code != RC.AttributeFailure:
return 0
return self._results.fields['omci_message'].fields['unsupported_attributes_mask']
@property
def raw_results(self):
"""
Return the raw Response OMCIFrame
"""
return self._results
def start(self):
"""
Start MIB Capabilities task
"""
super(OmciModifyRequest, self).start()
self._local_deferred = reactor.callLater(0, self.perform_omci)
@inlineCallbacks
def perform_omci(self):
"""
Perform the request
"""
self.log.debug('perform-request')
try:
self.strobe_watchdog()
self._results = yield self._device.omci_cc.send(self._frame)
status = self._results.fields['omci_message'].fields['success_code']
self.log.debug('response-status', status=status)
# Success?
if status in (RC.Success.value, RC.InstanceExists):
self.deferred.callback(self)
else:
raise ModifyException('Failed with status {}'.format(status))
except Exception as e:
self.log.exception('perform-modify', e=e)
self.deferred.errback(failure.Failure(e))
| {
"content_hash": "8148d27d6dcf701ecbea7ccf7ab373b8",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 100,
"avg_line_length": 35.12820512820513,
"alnum_prop": 0.6051094890510949,
"repo_name": "opencord/voltha",
"id": "da7bff5b5a6ca36b0522f987bc8baa2cec218d18",
"size": "6080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "voltha/extensions/omci/tasks/omci_modify_request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "30265"
},
{
"name": "Dockerfile",
"bytes": "2881"
},
{
"name": "Go",
"bytes": "181529"
},
{
"name": "Jinja",
"bytes": "25855"
},
{
"name": "Makefile",
"bytes": "76329"
},
{
"name": "Python",
"bytes": "9758796"
},
{
"name": "RobotFramework",
"bytes": "10188"
},
{
"name": "Ruby",
"bytes": "1126"
},
{
"name": "Shell",
"bytes": "758475"
},
{
"name": "XSLT",
"bytes": "175917"
}
],
"symlink_target": ""
} |
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.service import api
import sahara.service.validations.base as b
NODE_GROUP_TEMPLATE_SCHEMA = {
"type": "object",
"properties": {
"name": {
"type": "string",
"minLength": 1,
"maxLength": 50,
"format": "valid_name_hostname",
},
"flavor_id": {
'type': 'flavor',
},
"plugin_name": {
"type": "string",
},
"hadoop_version": {
"type": "string",
},
"node_processes": {
"type": "array",
"items": {
"type": "string",
},
"minItems": 1
},
"image_id": {
"type": "string",
"format": "uuid",
},
"node_configs": {
"type": "configs",
},
"volumes_per_node": {
"type": "integer",
"minimum": 0,
},
"volumes_size": {
"type": "integer",
"minimum": 1,
},
"volume_mount_prefix": {
"type": "string",
"format": "posix_path",
},
"description": {
"type": "string",
},
"floating_ip_pool": {
"type": "string",
},
"security_groups": {
"type": "array",
"items": {
"type": "string",
},
},
"auto_security_group": {
"type": "boolean"
},
},
"additionalProperties": False,
"required": [
"name",
"flavor_id",
"plugin_name",
"hadoop_version",
"node_processes",
]
}
def check_node_group_template_create(data, **kwargs):
b.check_node_group_template_unique_name(data['name'])
b.check_plugin_name_exists(data['plugin_name'])
b.check_plugin_supports_version(data['plugin_name'],
data['hadoop_version'])
b.check_node_group_basic_fields(data['plugin_name'],
data['hadoop_version'], data)
def check_node_group_template_usage(node_group_template_id, **kwargs):
cluster_users = []
template_users = []
for cluster in api.get_clusters():
if (node_group_template_id in
[node_group.node_group_template_id
for node_group in cluster.node_groups]):
cluster_users += [cluster.name]
for cluster_template in api.get_cluster_templates():
if (node_group_template_id in
[node_group.node_group_template_id
for node_group in cluster_template.node_groups]):
template_users += [cluster_template.name]
if cluster_users or template_users:
raise ex.InvalidException(
_("Node group template %(template)s is in use by "
"cluster templates: %(users)s; and clusters: %(clusters)s") %
{'template': node_group_template_id,
'users': template_users and ', '.join(template_users) or 'N/A',
'clusters': cluster_users and ', '.join(cluster_users) or 'N/A'})
| {
"content_hash": "f0ed0e2d57d9d632cfb9843650d6d10d",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 78,
"avg_line_length": 29.009174311926607,
"alnum_prop": 0.48070841239721696,
"repo_name": "mapr/sahara",
"id": "4ec89629f0dfe7fc6531be49555c9c80ac9c52f7",
"size": "3745",
"binary": false,
"copies": "2",
"ref": "refs/heads/juno-release",
"path": "sahara/service/validations/node_group_templates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "PigLatin",
"bytes": "161"
},
{
"name": "Python",
"bytes": "2112549"
},
{
"name": "Shell",
"bytes": "35267"
}
],
"symlink_target": ""
} |
import gevent
from locust import HttpUser, task
from locust.env import Environment
from locust.stats import stats_printer, stats_history
from locust.log import setup_logging
setup_logging("INFO", None)
class MyUser(HttpUser):
host = "https://docs.locust.io"
@task
def t(self):
self.client.get("/")
# setup Environment and Runner
env = Environment(user_classes=[MyUser])
runner = env.create_local_runner()
# start a WebUI instance
web_ui = env.create_web_ui("127.0.0.1", 8089)
# execute init event handlers (only really needed if you have registered any)
env.events.init.fire(environment=env, runner=runner, web_ui=web_ui)
# start a greenlet that periodically outputs the current stats
gevent.spawn(stats_printer(env.stats))
# start a greenlet that save current stats to history
gevent.spawn(stats_history, env.runner)
# start the test
runner.start(1, spawn_rate=10)
# in 60 seconds stop the runner
gevent.spawn_later(60, lambda: runner.quit())
# wait for the greenlets
runner.greenlet.join()
# stop the web server for good measures
web_ui.stop()
| {
"content_hash": "9867639d7e866a798c14f24840359fa7",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 77,
"avg_line_length": 24.545454545454547,
"alnum_prop": 0.7444444444444445,
"repo_name": "locustio/locust",
"id": "5d74d1a69a0585f28480a767094055651b080f58",
"size": "1080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/use_as_lib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "733"
},
{
"name": "HTML",
"bytes": "33145"
},
{
"name": "JavaScript",
"bytes": "17309"
},
{
"name": "Makefile",
"bytes": "436"
},
{
"name": "Python",
"bytes": "914443"
},
{
"name": "Sass",
"bytes": "10379"
},
{
"name": "Shell",
"bytes": "3452"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from .views import notification_list, notification_detail
urlpatterns = patterns(
'',
url(r'^$', notification_list,
name='api-notification-list'),
url(r'^(?P<pk>[0-9]+)/$', notification_detail,
name='api-notification-detail'),
)
| {
"content_hash": "f828489c9cff2083ff56112f4487721a",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 57,
"avg_line_length": 30.1,
"alnum_prop": 0.6578073089700996,
"repo_name": "donkawechico/arguman.org",
"id": "fefa7373f867a3a2f78dd5113ec881b47b4a1c61",
"size": "301",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "web/api/v1/notifications/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43733"
},
{
"name": "HTML",
"bytes": "57105"
},
{
"name": "JavaScript",
"bytes": "23645"
},
{
"name": "Python",
"bytes": "166099"
},
{
"name": "Shell",
"bytes": "392"
}
],
"symlink_target": ""
} |
"""
Sample code for sampling a multivariate Gaussian using emcee.
"""
from __future__ import print_function
import numpy as np
import emcee
# First, define the probability distribution that you would like to sample.
def lnprob(x, mu, icov):
diff = x-mu
return -np.dot(diff,np.dot(icov,diff))/2.0
# We'll sample a 50-dimensional Gaussian...
ndim = 50
# ...with randomly chosen mean position...
means = np.random.rand(ndim)
# ...and a positive definite, non-trivial covariance matrix.
cov = 0.5-np.random.rand(ndim**2).reshape((ndim, ndim))
cov = np.triu(cov)
cov += cov.T - np.diag(cov.diagonal())
cov = np.dot(cov,cov)
# Invert the covariance matrix first.
icov = np.linalg.inv(cov)
# We'll sample with 250 walkers.
nwalkers = 250
# Choose an initial set of positions for the walkers.
p0 = [np.random.rand(ndim) for i in xrange(nwalkers)]
# Initialize the sampler with the chosen specs.
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=[means, icov])
# Run 100 steps as a burn-in.
pos, prob, state = sampler.run_mcmc(p0, 100)
# Reset the chain to remove the burn-in samples.
sampler.reset()
# Starting from the final position in the burn-in chain, sample for 1000
# steps.
sampler.run_mcmc(pos, 1000, rstate0=state)
# Print out the mean acceptance fraction. In general, acceptance_fraction
# has an entry for each walker so, in this case, it is a 250-dimensional
# vector.
print("Mean acceptance fraction:", np.mean(sampler.acceptance_fraction))
# Estimate the integrated autocorrelation time for the time series in each
# parameter.
print("Autocorrelation time:", sampler.get_autocorr_time())
# Finally, you can plot the projected histograms of the samples using
# matplotlib as follows (as long as you have it installed).
try:
import matplotlib.pyplot as pl
except ImportError:
print("Try installing matplotlib to generate some sweet plots...")
else:
pl.hist(sampler.flatchain[:,0], 100)
pl.show()
| {
"content_hash": "fffb067c2dc9d13d8c97e75c80d8f6d6",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 75,
"avg_line_length": 30.515625,
"alnum_prop": 0.7306707629288275,
"repo_name": "drphilmarshall/emcee",
"id": "337fdcae5ac4f8400b9e2904b6e00fb8c7a1f88a",
"size": "1975",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "examples/quickstart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "127346"
},
{
"name": "TeX",
"bytes": "107733"
}
],
"symlink_target": ""
} |
class Error(Exception):
pass
class AuthError(Error):
pass
class RepoNotFoundError(Error):
pass
class UninitializedBuildError(Error):
pass
| {
"content_hash": "665314382177ee5df7cf468c9ac95c39",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 37,
"avg_line_length": 14.272727272727273,
"alnum_prop": 0.7261146496815286,
"repo_name": "percy/python-percy-client",
"id": "575ef059d4ce72d383cf10fe49eebb4b66a6a83c",
"size": "157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "percy/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2087"
},
{
"name": "Python",
"bytes": "69918"
}
],
"symlink_target": ""
} |
"""Implementation of JSONEncoder
"""
import re
c_encode_basestring_ascii = None
c_encode_basestring = None
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(b'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
#ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
INFINITY = float('inf')
def py_encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
encode_basestring = (c_encode_basestring or py_encode_basestring)
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u{0:04x}'.format(n)
#return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return str('"' + ESCAPE_ASCII.sub(replace, s) + '"')
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str | string |
+-------------------+---------------+
| int, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming non-ASCII characters escaped. If
ensure_ascii is false, the output can contain non-ASCII characters.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be an (item_separator, key_separator)
tuple. The default is (', ', ': ') if *indent* is ``None`` and
(',', ': ') otherwise. To get the most compact JSON representation,
you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
elif indent is not None:
self.item_separator = ','
if default is not None:
self.default = default
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
# Let the base class default method raise the TypeError
return JSONEncoder.default(self, o)
"""
raise TypeError("Object of type '%s' is not JSON serializable" %
o.__class__.__name__)
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from json.encoder import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
def floatstr(o, allow_nan=self.allow_nan,
_repr=float.__repr__, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on the
# internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
if (_one_shot and c_make_encoder is not None
and self.indent is None):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
ValueError=ValueError,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
str=str,
tuple=tuple,
# XXX for python2 json compatibility: support int and long
# _intstr=int.__str__
_intstr=str
):
if _indent is not None and not isinstance(_indent, str):
_indent = ' ' * _indent
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + _indent * _current_indent_level
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
# XXX for python2 json compatibility: python2 has long, python3 does not
elif isinstance(value, (int, long)):
# Subclasses of int/float may override __str__, but we still
# want to encode them as integers/floats in JSON. One example
# within the standard library is IntEnum.
yield buf + _intstr(value)
elif isinstance(value, float):
# see comment above for int
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for xx in chunks:
yield xx
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + _indent * _current_indent_level
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + _indent * _current_indent_level
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = sorted(dct.items(), key=lambda kv: kv[0])
else:
items = dct.items()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
# see comment for int/float in _make_iterencode
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
# XXX for python2 json compatibility: python2 has long, python3 does not
elif isinstance(key, (int, long)):
# see comment for int/float in _make_iterencode
key = _intstr(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
# XXX for python2 json compatibility: python2 has long, python3 does not
elif isinstance(value, (int, long)):
# see comment for int/float in _make_iterencode
yield _intstr(value)
elif isinstance(value, float):
# see comment for int/float in _make_iterencode
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for xx in chunks:
yield xx
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + _indent * _current_indent_level
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
# XXX for python2 json compatibility: python2 has long, python3 does not
elif isinstance(o, (int, long)):
# see comment for int/float in _make_iterencode
yield _intstr(o)
elif isinstance(o, float):
# see comment for int/float in _make_iterencode
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for xx in _iterencode_list(o, _current_indent_level):
yield xx
elif isinstance(o, dict):
for xx in _iterencode_dict(o, _current_indent_level):
yield xx
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for xx in _iterencode(o, _current_indent_level):
yield xx
if markers is not None:
del markers[markerid]
return _iterencode
| {
"content_hash": "83610b8b730b0e98293a913f5402da30",
"timestamp": "",
"source": "github",
"line_count": 444,
"max_line_length": 84,
"avg_line_length": 36.86711711711712,
"alnum_prop": 0.5270938970004276,
"repo_name": "sejust/pykit",
"id": "6389cf7d61f1a5c3b35a3f981174212f5d4e64ce",
"size": "16369",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "p3json/encoder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "142"
},
{
"name": "Python",
"bytes": "1193736"
},
{
"name": "Shell",
"bytes": "45490"
}
],
"symlink_target": ""
} |
import os
import unittest
import numpy
import dadi
class ResultsTestCase(unittest.TestCase):
def test_1d_ic(self):
# This just the standard neutral model
func_ex = dadi.Numerics.make_extrap_log_func(dadi.Demographics1D.snm)
fs = func_ex([], (17,), [100,120,140])
answer = dadi.Spectrum(1./numpy.arange(18))
self.assert_(numpy.ma.allclose(fs, answer, atol=1e-3))
def test_1d_stationary(self):
func_ex = dadi.Numerics.\
make_extrap_log_func(dadi.Demographics1D.two_epoch)
# We let a two-epoch model equilibrate for tau=10, which should
# eliminate almost all traces of the size change.
fs = func_ex((0.5,10), (17,), [40,50,60])
answer = dadi.Spectrum(0.5/numpy.arange(18))
self.assert_(numpy.ma.allclose(fs, answer, atol=1e-2))
def test_IM(self):
func_ex = dadi.Numerics.\
make_extrap_log_func(dadi.Demographics2D.IM)
params = (0.8, 2.0, 0.6, 0.45, 5.0, 0.3)
ns = (7,13)
pts_l = [40,50,60]
theta = 1000.
fs = theta*func_ex(params, ns, pts_l)
#mscore = dadi.Demographics2D.IM_mscore(params)
#mscommand = dadi.Misc.ms_command(1,ns,mscore,1e6)
#msfs = theta*dadi.Spectrum.from_ms_file(os.popen(mscommand))
#msfs.to_file('IM.fs')
msfs = dadi.Spectrum.from_file('IM.fs')
resid = dadi.Inference.Anscombe_Poisson_residual(fs,msfs)
self.assert_(abs(resid).max() < 0.2)
suite = unittest.TestLoader().loadTestsFromTestCase(ResultsTestCase)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "2a73183e038d52110a08e5efbc01ee85",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 77,
"avg_line_length": 33.791666666666664,
"alnum_prop": 0.6097410604192355,
"repo_name": "niuhuifei/dadi",
"id": "d766f002302a32eda8cad61e5ca2243f9ef58da3",
"size": "1622",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tests/test_Results.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "23285"
},
{
"name": "Forth",
"bytes": "11731"
},
{
"name": "Python",
"bytes": "316739"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from future.builtins import str, super
import os
import datetime
from django import forms
from django.conf import settings
from django.core.files.storage import default_storage
from django.db.models.fields import Field
from django.db.models.fields.files import FileDescriptor
from django.forms.widgets import Input
from django.template.loader import render_to_string
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
from filebrowser_safe.settings import *
from filebrowser_safe.base import FieldFileObject
from filebrowser_safe.functions import get_directory
class FileBrowseWidget(Input):
input_type = 'text'
class Media:
js = (os.path.join(URL_FILEBROWSER_MEDIA, 'js/AddFileBrowser.js'), )
def __init__(self, attrs=None):
self.directory = attrs.get('directory', '')
self.extensions = attrs.get('extensions', '')
self.format = attrs.get('format', '')
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def render(self, name, value, attrs=None):
if value is None:
value = ""
directory = self.directory
if self.directory:
if callable(self.directory):
directory = self.directory()
directory = os.path.normpath(datetime.datetime.now().strftime(directory))
fullpath = os.path.join(get_directory(), directory)
if not default_storage.isdir(fullpath):
default_storage.makedirs(fullpath)
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
final_attrs['search_icon'] = URL_FILEBROWSER_MEDIA + 'img/filebrowser_icon_show.gif'
final_attrs['directory'] = directory
final_attrs['extensions'] = self.extensions
final_attrs['format'] = self.format
final_attrs['DEBUG'] = DEBUG
return render_to_string("filebrowser/custom_field.html", dict(locals(), MEDIA_URL=MEDIA_URL))
class FileBrowseFormField(forms.CharField):
widget = FileBrowseWidget
default_error_messages = {
'extension': _(u'Extension %(ext)s is not allowed. Only %(allowed)s is allowed.'),
}
def __init__(self, max_length=None, min_length=None,
directory=None, extensions=None, format=None,
*args, **kwargs):
self.max_length, self.min_length = max_length, min_length
self.directory = directory
self.extensions = extensions
if format:
self.format = format or ''
self.extensions = extensions or EXTENSIONS.get(format)
super(FileBrowseFormField, self).__init__(*args, **kwargs)
def clean(self, value):
value = super(FileBrowseFormField, self).clean(value)
if value == '':
return value
file_extension = os.path.splitext(value)[1].lower().split("?")[0]
if self.extensions and not file_extension in self.extensions:
raise forms.ValidationError(self.error_messages['extension'] % {'ext': file_extension, 'allowed': ", ".join(self.extensions)})
return value
class FileBrowseField(Field):
# These attributes control how the field is accessed on a model instance.
# Due to contribute_to_class, FileDescriptor will cause this field to be
# represented by a FileFieldObject on model instances.
# Adapted from django.db.models.fields.files.FileField.
attr_class = FieldFileObject
descriptor_class = FileDescriptor
def __init__(self, *args, **kwargs):
self.directory = kwargs.pop('directory', '')
self.extensions = kwargs.pop('extensions', '')
self.format = kwargs.pop('format', '')
self.storage = kwargs.pop('storage', default_storage)
super(FileBrowseField, self).__init__(*args, **kwargs)
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return smart_str(value)
def get_manipulator_field_objs(self):
return [oldforms.TextField]
def get_internal_type(self):
return "CharField"
def formfield(self, **kwargs):
attrs = {
'directory': self.directory,
'extensions': self.extensions,
'format': self.format,
'storage': self.storage,
}
defaults = {
'form_class': FileBrowseFormField,
'widget': FileBrowseWidget(attrs=attrs),
'directory': self.directory,
'extensions': self.extensions,
'format': self.format
}
defaults.update(kwargs)
return super(FileBrowseField, self).formfield(**defaults)
def contribute_to_class(self, cls, name, **kwargs):
"""
From django.db.models.fields.files.FileField.contribute_to_class
"""
super(FileBrowseField, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, self.descriptor_class(self))
| {
"content_hash": "2e09290625d9aed1c7e7f1229ca38fb3",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 138,
"avg_line_length": 37.88721804511278,
"alnum_prop": 0.6413971025997222,
"repo_name": "ryneeverett/filebrowser-safe",
"id": "62db66322e21052161f6f0541297c187a3d89ae4",
"size": "5039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filebrowser_safe/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4118"
},
{
"name": "HTML",
"bytes": "31600"
},
{
"name": "JavaScript",
"bytes": "23026"
},
{
"name": "Python",
"bytes": "56260"
},
{
"name": "Shell",
"bytes": "385"
}
],
"symlink_target": ""
} |
r"""Classes to deal with the Takin resolution calculation engine
"""
import os
import pipes
import sys
from .tas_instrument import TripleAxisInstrument
from .tof_instrument import TimeOfFlightInstrument
class TakinTripleAxis(TripleAxisInstrument):
r"""Interface between TripleAxisInstrument and Takin engine
"""
def __init__(self, *args, **kwargs):
super(TakinTripleAxis, self).__init__(*args, **kwargs)
def __repr__(self):
return "Instrument('tas', engine='takin')"
def calc_resolution(self, hkle):
pass
def calc_projections(self, hkle):
pass
def resolution_convolution(self):
pass
class TakinTimeOfFlight(TimeOfFlightInstrument):
r"""Interface between TimeOfFlightInstrument and Takin engine
"""
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
return "Instrument('tof', engine='takin')"
def calc_resolution(self, hkle):
pass
def calc_projections(self, hkle):
pass
def resolution_convolution(self):
pass
| {
"content_hash": "dd83b9ce5bc049b19f733e6df4256047",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 65,
"avg_line_length": 21.857142857142858,
"alnum_prop": 0.6582633053221288,
"repo_name": "granrothge/neutronpy",
"id": "3c4a6ca9307e6a391d4068a01bf80d70b8529cd1",
"size": "1071",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutronpy/instrument/takin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "Makefile",
"bytes": "1333"
},
{
"name": "Python",
"bytes": "461368"
},
{
"name": "Shell",
"bytes": "219"
}
],
"symlink_target": ""
} |
__author__ = 'jitrixis'
from sniffery import Sniffery
class Harvest:
def __init__(self):
self.__sniffery = Sniffery()
def check(self, pks, pkt):
pks = self.__sniffery.sniff(str(pks))
pkt = self.__sniffery.sniff(str(pkt))
if pks is None or pkt is None:
return False
return self.__findWay(pks, pkt)
def __findWay(self, pks, pkt):
if pks["type"] == "arp":
return self.__wayArp(pks, pkt)
elif pks["type"] == "icmp":
return self.__wayIcmp(pks, pkt)
elif pks["type"] == "tcp":
return self.__wayTcp(pks, pkt)
else:
return False
def __wayArp(self, pks, pkt):
s = pks["packet"]
d = pkt["packet"]
if s["ethernet"].getSrc() != d["ethernet"].getDst():
return False
if s["ethernet"].getType() != d["ethernet"].getType():
return False
if s["arp"].getHwsrc() != d["arp"].getHwdst():
return False
if s["arp"].getHwlen() != d["arp"].getHwlen():
return False
if s["arp"].getHwtype() != d["arp"].getHwtype():
return False
if s["arp"].getPdst() != d["arp"].getPsrc():
return False
if s["arp"].getPsrc() != d["arp"].getPdst():
return False
if s["arp"].getPlen() != d["arp"].getPlen():
return False
if s["arp"].getPtype() != d["arp"].getPtype():
return False
if d["arp"].getOp() != 0x2:
return False
return True
def __wayIcmp(self, pks, pkt):
s = pks["packet"]
d = pkt["packet"]
if s["ethernet"].getSrc() != d["ethernet"].getDst():
return False
if s["ethernet"].getDst() != d["ethernet"].getSrc():
return False
if s["ethernet"].getType() != d["ethernet"].getType():
return False
if s["ip"].getDst() != d["ip"].getSrc():
return False
if s["ip"].getSrc() != d["ip"].getDst():
return False
if s["ip"].getProto() != d["ip"].getProto():
return False
if s["ip"].getVersion() != d["ip"].getVersion():
return False
if s["icmp"].getId() != d["icmp"].getId():
return False
if s["icmp"].getSeq() != d["icmp"].getSeq():
return False
if d["icmp"].getCode() != 0:
return False
if d["icmp"].getType() != 0:
return False
return True
def __wayTcp(self, pks, pkt):
return True | {
"content_hash": "445938fac8d46a0961b3d1c5c4ea39f4",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 62,
"avg_line_length": 29.04494382022472,
"alnum_prop": 0.47775628626692457,
"repo_name": "Jitrixis/2ARC-Network-stack",
"id": "bdc3a5bf5c6bed0f7d12e69722540b4be9464408",
"size": "2585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TVpy/Factory/harvestery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42544"
}
],
"symlink_target": ""
} |
from PIL import Image, ImageDraw, ImageFont
import random
import string
class SimpleCaptchaException(Exception):
pass
class SimpleCaptcha(object):
def __init__(self, length=5, size=(200, 100), fontsize=36,
random_text=None, random_bgcolor=None):
self.size = size
self.text = "CAPTCHA"
self.fontsize = fontsize
self.bgcolor = 255
self.length = length
self.image = None # current captcha image
if random_text:
self.text = self._random_text()
if not self.text:
raise SimpleCaptchaException("Field text must not be empty.")
if not self.size:
raise SimpleCaptchaException("Size must not be empty.")
if not self.fontsize:
raise SimpleCaptchaException("Font size must be defined.")
if random_bgcolor:
self.bgcolor = self._random_color()
def _center_coords(self, draw, font):
width, height = draw.textsize(self.text, font)
xy = (self.size[0] - width) / 2., (self.size[1] - height) / 2.
return xy
def _add_noise_dots(self, draw):
size = self.image.size
for _ in range(int(size[0] * size[1] * 0.1)):
draw.point((random.randint(0, size[0]),
random.randint(0, size[1])),
fill="white")
return draw
def _add_noise_lines(self, draw):
size = self.image.size
for _ in range(8):
width = random.randint(1, 2)
start = (0, random.randint(0, size[1] - 1))
end = (size[0], random.randint(0, size[1] - 1))
draw.line([start, end], fill="white", width=width)
for _ in range(8):
start = (-50, -50)
end = (size[0] + 10, random.randint(0, size[1] + 10))
draw.arc(start + end, 0, 360, fill="white")
return draw
def get_captcha(self, size=None, text=None, bgcolor=None):
if text is not None:
self.text = text
if size is not None:
self.size = size
if bgcolor is not None:
self.bgcolor = bgcolor
self.image = Image.new('RGB', self.size, self.bgcolor)
font = ImageFont.truetype('fonts/Vera.ttf', self.fontsize)
draw = ImageDraw.Draw(self.image)
xy = self._center_coords(draw, font)
draw.text(xy=xy, text=self.text, font=font)
# Add some noise
draw = self._add_noise_dots(draw)
# Add some random lines
draw = self._add_noise_lines(draw)
self.image.show()
return self.image, self.text
def _random_text(self):
letters = string.ascii_lowercase + string.ascii_uppercase
random_text = ""
for _ in range(self.length):
random_text += random.choice(letters)
return random_text
def _random_color(self):
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
return (r, g, b)
if __name__ == "__main__":
sc = SimpleCaptcha(length=7, fontsize=36, random_text=True, random_bgcolor=True)
sc.get_captcha()
| {
"content_hash": "74477ad6fe522d47ca46f0752bd68300",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 84,
"avg_line_length": 31.57,
"alnum_prop": 0.5603420969274627,
"repo_name": "pletisan/python-data-viz-cookbook",
"id": "ef14a186aa5665bb72ee47093cd64c8a97be91fc",
"size": "3157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "3367OS_Code/3367OS_06_Code/ch06/ch06_rec06_01_captcha.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""Shard life cycle abstract class."""
class _ShardLifeCycle(object):
"""Abstract class for objects that live along shard's life cycle.
Objects that need to plug in business logic into a shard's life cycle
should implement this interface.
The life cycle is:
* begin_shard is called at the beginning of every shard attempt.
* begin_slice is called at the beginning of every slice attempt.
* end_slice is called at the end of a slice. Slice may still fail
after the call.
* end_shard is called at the end of a shard. Shard may still fail
after the call.
All these methods are invoked as part of shard execution. So be careful
not to perform long standing IO operations that may kill this request.
"""
def begin_shard(self, shard_ctx):
"""Called at the beginning of a shard.
This method may be called more than once due to shard and slice retry.
Make it idempotent.
Args:
shard_ctx: map_job_context.ShardContext object.
"""
pass
def end_shard(self, shard_ctx):
"""Called at the end of a shard.
This method may be called more than once due to shard and slice retry.
Make it idempotent.
If shard execution error out before reaching the end, this method
won't be called.
Args:
shard_ctx: map_job_context.ShardContext object.
"""
pass
def begin_slice(self, slice_ctx):
"""Called at the beginning of a slice.
This method may be called more than once due to slice retry.
Make it idempotent.
Args:
slice_ctx: map_job_context.SliceContext object.
"""
pass
def end_slice(self, slice_ctx):
"""Called at the end of a slice.
This method may be called more than once due to slice retry.
Make it idempotent.
If slice execution error out before reaching the end, this method
won't be called.
Args:
slice_ctx: map_job_context.SliceContext object.
"""
pass
| {
"content_hash": "20070ac6ae9c76ad979d311d6f23e27f",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 74,
"avg_line_length": 26.47945205479452,
"alnum_prop": 0.6875323331608898,
"repo_name": "gauribhoite/personfinder",
"id": "18a2144a410149447b873d68bcf3db81f88a696f",
"size": "2534",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "env/google_appengine/google/appengine/ext/mapreduce/shard_life_cycle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "423"
},
{
"name": "Batchfile",
"bytes": "5005"
},
{
"name": "C",
"bytes": "413819"
},
{
"name": "CSS",
"bytes": "330448"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "HTML",
"bytes": "720955"
},
{
"name": "JavaScript",
"bytes": "1072023"
},
{
"name": "Makefile",
"bytes": "16086"
},
{
"name": "PHP",
"bytes": "2582470"
},
{
"name": "Python",
"bytes": "60243792"
},
{
"name": "Shell",
"bytes": "7491"
},
{
"name": "TeX",
"bytes": "60219"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
} |
__all__ = ['create_test_spatial_db', 'get_geo_where_clause', 'SpatialBackend']
from django.contrib.gis.db.backend.base import BaseSpatialBackend
from django.contrib.gis.db.backend.oracle.adaptor import OracleSpatialAdaptor
from django.contrib.gis.db.backend.oracle.creation import create_test_spatial_db
from django.contrib.gis.db.backend.oracle.field import OracleSpatialField
from django.contrib.gis.db.backend.oracle.models import GeometryColumns, SpatialRefSys
from django.contrib.gis.db.backend.oracle.query import *
SpatialBackend = BaseSpatialBackend(name='oracle', oracle=True,
area=AREA,
centroid=CENTROID,
difference=DIFFERENCE,
distance=DISTANCE,
distance_functions=DISTANCE_FUNCTIONS,
extent=EXTENT,
gis_terms=ORACLE_SPATIAL_TERMS,
gml=ASGML,
intersection=INTERSECTION,
length=LENGTH,
limited_where = {'relate' : None},
num_geom=NUM_GEOM,
num_points=NUM_POINTS,
perimeter=LENGTH,
point_on_surface=POINT_ON_SURFACE,
select=GEOM_SELECT,
sym_difference=SYM_DIFFERENCE,
transform=TRANSFORM,
unionagg=UNIONAGG,
union=UNION,
Adaptor=OracleSpatialAdaptor,
Field=OracleSpatialField,
GeometryColumns=GeometryColumns,
SpatialRefSys=SpatialRefSys,
)
| {
"content_hash": "900464e0c8fbc84d5896b28f99dc45c4",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 86,
"avg_line_length": 59.65714285714286,
"alnum_prop": 0.4540229885057471,
"repo_name": "greggian/TapdIn",
"id": "4c54b8f60565d4454f056d14b7a12ab328ffc684",
"size": "2088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/gis/db/backend/oracle/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "82525"
},
{
"name": "Python",
"bytes": "3585862"
},
{
"name": "Shell",
"bytes": "227"
}
],
"symlink_target": ""
} |
todayFile = file ( 'today.htm', 'w' )
print >> todayFile, '<!-- DO NOT edit this file, edit the following file \nJ:\\outlines\\today.py\n -->'
tables = {
'1 Today': """\
basement mold
squirrels
check libraries
security lights
rethink Internet configuration
PBS Price Pottenger Rae
auto search Hamilton career web sites
Salvation Army form
report to Giammarco
call about DVD warranty
free career counselling to selected candidates
resume to Carole Martin (see ad from HRDC software)
tweezers
""",
'5 Home Depot': """\
clamp for motor
trenching tool
""",
'2 Giant Tiger':"""\
chewies
1/2 & 1/2
dye
""",
'4 Hagersville': """\
""",
'3 Groceries': """\
""",
'6 Libraries': """\
Alexander Fullerton
Champagne Navy
mystery novels
Barbara Hardy: London Lovers
John Buxton Hilton: Displaced Person
Patricia Duncker: Monsieur Shoushana's Lemon Trees
Klemperer (Cohen)
""",
'7 Hamilton': """\
water filter
""",
}
from string import split
for table in tables :
print >> todayFile, '<table width="100%" cellspacing="5">'
print >> todayFile, '<tr><th colspan="3" align="center">%s</th></tr>' % table
items = [ item for item in split ( tables [ table ], '\n' ) if item ] + 3 * [ '-' ]
for start in range ( 0, len ( items ), 3 ) :
if items [ start ] != '-' :
print >> todayFile, '<tr>',
for item in items [ start : start + 3 ] :
print >> todayFile, '<td>%s</td>' % item
print >> todayFile, '</tr>'
print >> todayFile, '</table>'
todayFile . close ( )
from win32com.shell.shellcon import *
from win32com.shell import shell
# SHChangeNotify should, I understand, be content with None in each of the final two arguments
# However, since it's not ...
lpil = shell . SHGetSpecialFolderLocation ( 0, CSIDL_DESKTOP )
shell . SHChangeNotify ( SHCNE_ASSOCCHANGED, SHCNF_IDLIST, lpil, lpil )
| {
"content_hash": "8744d4602e18f2f73d840e47fd8087bc",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 104,
"avg_line_length": 23.91025641025641,
"alnum_prop": 0.6536193029490617,
"repo_name": "ActiveState/code",
"id": "5a99696c1cd448145355a5a5780040830f2de992",
"size": "1865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/412808_Refreshing_Windows_Desktop__New_Help/recipe-412808.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_rock_shelter_large.iff"
result.attribute_template_id = -1
result.stfName("lair_n","rock_shelter")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "01bb1c1d493c52c4332bf81028b55f22",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 24.53846153846154,
"alnum_prop": 0.6959247648902821,
"repo_name": "anhstudios/swganh",
"id": "785dbb49ed52fd6040a59ab0ee886b7b68c45854",
"size": "464",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_rock_shelter_large.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import time
import requests
import os
import errno
import ast
import sys
import argparse
import configparser
import threading
import logging
from pathlib import Path
from datetime import timedelta, datetime
__version__ = '0.17'
prog_description = ("Generates kodi-compatible strm files from torrents on "
"premiumize. Parameters are required for first run to "
"generate config file")
# on disk torrent hash db
hash_db = os.path.join(os.path.dirname(os.path.abspath(__file__)), "hash.db")
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"config_p2l.ini")
# Define video and subs extensions
VIDEO_EXTS = ['M4V', '3G2', '3GP', 'NSV', 'TP', 'TS', 'TY', 'PLS', 'RM',
'RMVB', 'MPD', 'M3U', 'M3U8', 'IFO', 'MOV', 'QT', 'DIVX', 'XVID',
'BIVX', 'VOB', 'NRG', 'PVA', 'WMV', 'ASF', 'ASX', 'OGM', 'M2V',
'AVI', 'DAT', 'MPG', 'MPEG', 'MP4', 'MKV', 'MK3D', 'AVC', 'VP3',
'SVQ3', 'NUV', 'VIV', 'DV', 'FLI', 'FLV', '001', 'WPL', 'VDR',
'DVR-MS', 'XSP', 'MTS', 'M2T', 'M2TS', 'EVO', 'OGV', 'SDP',
'AVS', 'REC', 'URL', 'PXML', 'VC1', 'H264', 'RCV', 'RSS',
'MPLS', 'WEBM', 'BDMV', 'WTV']
SUBS_EXTS = ['SRT']
def load_hashdb():
# Load hash db from disk
logger = logging.getLogger("load_hashdb")
if os.path.exists(hash_db):
try:
with open(hash_db, 'r') as file:
raw = file.read()
ondisk_hashes = ast.literal_eval(raw)
except Exception as e:
logger.warning("{0}".format(e))
logger.warning("Assuming empty hash db")
ondisk_hashes = []
else:
ondisk_hashes = []
return ondisk_hashes
# Get torrent list from root_list
def get_torrents(content, all_at_once, base_dir):
days_before_refresh = 7
logger = logging.getLogger("get_torrents")
torrents = []
imported_torrents = []
ondisk_hashes = load_hashdb()
for item in content:
if item['type'] == 'folder':
curTorrent = {'name': item['name'], 'hash': item['id'],
'date': datetime.today().strftime("%d%m%y"),
'skip': False}
torrents.append(curTorrent)
print("---------- Found torrent ----------")
print("Torrent: " + item['name'])
print("Hash: " + item['id'])
skip = False
if not ondisk_hashes == []:
# check for unique hash before import
for od_hash in ondisk_hashes:
if (od_hash['hash'] == curTorrent['hash'] and
(datetime.today() -
datetime.strptime(od_hash['date'], "%d%m%y")) <
timedelta(days_before_refresh) and
(od_hash['skip'] or
os.path.exists(os.path.join(base_dir,
od_hash['name'])))):
print("Skipping, already on disk or" +
" marked as skip")
skip = True
break
while not skip:
if all_at_once:
import_torrent = 'Y'
else:
import_torrent = input("Import torrent? (y/n)")
if import_torrent.upper() == 'Y':
logger.debug("Importing " + item['name'] +
" hash: " + item['id'])
imported_torrents.append(curTorrent)
browse_torrent(item['id'], all_at_once, os.path.join(base_dir, item['name']))
break
elif import_torrent.upper() == 'N':
curTorrent['skip'] = True
imported_torrents.append(curTorrent)
logger.debug("Skipping " + item['name'] +
" hash: " + item['id'])
break
cleanup(torrents, imported_torrents)
# Browse content of torrent for videos
def browse_torrent(hash_id, all_at_once,base_dir):
number_of_retries = 5
logger = logging.getLogger("browse_torrent")
for i in range(1, number_of_retries + 1):
try:
results = (requests.
post('https://www.premiumize.me/api/folder/list',
data={'customer_id': customer_id, 'pin': pin,
'id': hash_id})).json()
logger.debug(results)
except (requests.ConnectionError,
requests.HTTPError, requests.Timeout) as e:
logger.warning("Error getting torrent " + hash_id)
logger.warning("{0}".format(e))
logger.warning("Retry: " + str(i) + "/" + str(number_of_retries))
time.sleep(10)
except requests.RequestException as e:
logger.critical("{0}".format(e))
logger.critical("Unable to handle exception, quitting")
sys.exit(1)
else:
break
else:
logger.critical("Unable to handle exception, quitting")
sys.exit(1)
if 'content' in results:
for item in results['content']:
if item['type'] == 'folder':
logger.debug("Found folder " + item['name'])
browse_torrent(item['id'], all_at_once, os.path.join(base_dir, item['name']))
elif item['type'] == 'file':
logger.debug("Found file " + os.path.join(base_dir,item['name']))
if os.path.splitext(item['name'])[1].upper()[1:] in VIDEO_EXTS:
logger.info("Found video: " + item['name'])
path = os.path.join(base_dir,
os.path.splitext(item['name'])[0]+'.strm')
video = {'path': path, 'name': item['name'],
'url': item['stream_link']}
create_strm(video,all_at_once)
elif os.path.splitext(item['name'])[1].upper()[1:] in SUBS_EXTS:
logger.info("Found subtitle: " + item['name'])
path = os.path.join(base_dir, item['name'])
sub = {'path': path, 'name': item['name'], 'url': item['link']}
t = threading.Thread(target=download_sub,
args=(sub, all_at_once),
name="Download: " + item['name'])
t.start()
else:
logger.debug("File is unknown")
# Generate strm file
def create_strm(video, all_at_once):
logger = logging.getLogger("create_strm")
# create directory if not exists
if not os.path.exists(os.path.dirname(video['path'])):
try:
os.makedirs(os.path.dirname(video['path']))
logger.debug("Created path: " + os.path.dirname(video['path']))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
# create strm file if not exists
logger.debug("Creating file: " + video['path'])
while True:
try:
with open(video['path'], "w") as f:
f.write(video['url'])
except Exception as e:
logger.warning("{0}".format(e))
e_handling = input("(R)etry, (S)kip, (A)bort?")
if e_handling.upper() == 'S' or all_at_once:
break
elif e_handling.upper() == 'A':
sys.exit(1)
else:
pass
else:
break
# Download subtitle file
def download_sub(sub, all_at_once):
number_of_retries = 5
logger = logging.getLogger("download_sub")
# create directory if not exists
if not os.path.exists(os.path.dirname(sub['path'])):
try:
os.makedirs(os.path.dirname(sub['path']))
logger.debug("Created path: " + os.path.dirname(sub['path']))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
# create sub file if not exists
if not os.path.exists(sub['path']):
logger.debug("Creating file: " + sub['path'])
while True:
try:
with open(sub['path'], "wb") as file:
for i in range(1, number_of_retries + 1):
try:
sub_file = requests.get(sub['url'])
except (requests.ConnectionError,
requests.HTTPError, requests.Timeout) as e:
logger.warning("Error getting subtitle " +
sub['url'])
logger.warning("{0}".format(e))
logger.warning("Retry: " + str(i) + "/" +
str(number_of_retries))
time.sleep(10)
except requests.RequestException as e:
logger.critical("{0}".format(e))
logger.critical("Unable to handle exception," +
"quitting")
sys.exit(1)
else:
break
else:
print("Unable to handle exception, quitting")
sys.exit(1)
file.write(sub_file.content)
except Exception as e:
logger.warning("{0}".format(e))
e_handling = input("(R)etry, (S)kip, (A)bort?")
if e_handling.upper() == 'S' or all_at_once:
break
elif e_handling.upper() == 'A':
sys.exit(1)
else:
pass
else:
break
else:
logger.debug("Skipping file " + sub['path'] + " already exists")
# Check if files on disk are still available on premiumize
# Delete if remotely deleted
def cleanup(torrents, imported_torrents):
logger = logging.getLogger("cleanup")
logger.info("Cleanup...")
ondisk_hashes = load_hashdb()
# Load hash db from disk
if not ondisk_hashes == []:
# check for unique hash before import
for im_torrent in imported_torrents:
for od_hash in ondisk_hashes:
if od_hash['hash'] == im_torrent['hash']:
break
else:
ondisk_hashes.append(im_torrent)
else:
ondisk_hashes = imported_torrents
# compare ondisk_hashes with torrents hashes
cleaned_hashes = []
for od_hash in ondisk_hashes:
for torrent in torrents:
if (od_hash['hash'] == torrent['hash']):
logger.info("Keeping " + od_hash['name'])
cleaned_hashes.append(od_hash)
break
else:
if os.path.exists(os.path.join(base_dir, od_hash['name'])):
logger.warning("Deleting " + od_hash['name'] + " from disk" +
" because it was deleted on premiumize")
try:
torrent_path = Path(os.path.join(base_dir,
od_hash['name']))
for f in torrent_path.glob("**/*.strm"):
print("Deleting " + str(f))
os.remove(str(f))
except Exception as e:
logger.warning("{0}".format(e))
logger.warning("Unable to properly delete torrent")
logger.warning("Keeping in db for next cleanup")
cleaned_hashes.append(od_hash)
else:
logger.debug("Deleted " +
os.path.join(base_dir, od_hash['name']))
else:
logger.warning(od_hash['name'] + " has been removed " +
"from premiumize")
ondisk_hashes = cleaned_hashes
# create directory if not exists
try:
if not os.path.exists(os.path.dirname(hash_db)):
try:
os.makedirs(os.path.dirname(hash_db))
logger.debug("Created path: " + os.path.dirname(hash_db))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
# save hash db to disk
with open(hash_db, "w") as file:
file.write(str(ondisk_hashes))
except Exception as e:
logger.warning("{0}".format(e))
logger.warning("Unable to save hash db to disc")
def main():
global customer_id, pin
parser = argparse.ArgumentParser(description=prog_description)
config = configparser.ConfigParser()
config['MAIN'] = {}
# if config not exists, make args required
# args_req = True
if not os.path.exists(config_file):
args_req = True
# Config file exists, load its values first, override with args
else:
try:
config.read(config_file)
customer_id = config.get('MAIN', 'customer_id')
pin = config.get('MAIN', 'pin')
base_dir = config.get('MAIN', 'base_dir')
except (configparser.Error) as e:
print("{0}".format(e))
print("Error reading config file, ignoring")
customer_id = ""
pin = ""
base_dir = ""
args_req = True
else:
args_req = False
parser.add_argument('-u', '--user', metavar="ID", required=args_req,
help="Premiumize customer id")
parser.add_argument('-p', '--pin', required=args_req,
help="Premiumize PIN")
parser.add_argument('-o', '--outdir', required=args_req, metavar="PATH",
help="Output directory for generated files")
parser.add_argument('-a', '--all', action='store_true',
help="Import all videos from premiumize at once")
debug_group = parser.add_argument_group("Ouput", "Output related options")
debug_options = debug_group.add_mutually_exclusive_group()
debug_options.add_argument('-d', '--debug', action="store_true",
help="Show debug output")
debug_options.add_argument('-v', '--verbose', action='store_true',
help="Show verbose output")
debug_options.add_argument('-q', '--quiet', action='store_true',
help="Disable output")
parser.add_argument('--version', action='version',
version='%(prog)s {version}'
.format(version=__version__))
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
elif args.verbose:
logging.basicConfig(level=logging.INFO)
elif args.quiet:
sys.stdout = open(os.devnull, "w")
else:
logging.basicConfig(level=logging.WARNING)
if args.user is not None:
customer_id = args.user
config['MAIN']['customer_id'] = args.user
if args.pin is not None:
pin = args.pin
config['MAIN']['pin'] = args.pin
if args.outdir is not None:
base_dir = os.path.join(args.outdir, '')
config['MAIN']['base_dir'] = os.path.join(args.outdir, '')
logger = logging.getLogger("main")
logger.debug("Arguments from command line: " + str(sys.argv[1:]))
with open(config_file, 'w') as configfile:
config.write(configfile)
logger.debug("Saved config to file: " + config_file)
# Start actual creation process
number_of_retries = 5
for i in range(1, number_of_retries + 1): # Try 5 times
try:
root_list = (requests.post(
'https://www.premiumize.me/api/folder/list',
data={'customer_id': customer_id, 'pin': pin})).json()
logger.debug(root_list)
except (requests.ConnectionError,
requests.HTTPError, requests.Timeout) as e:
logger.warning("Error getting root folder from premiumize")
logger.warning("{0}".format(e))
logger.warning("Retry: " + str(i) + "/" + str(number_of_retries))
time.sleep(10)
except requests.RequestException as e:
logger.critical("{0}".format(e))
logger.critical("Unable to handle exception, quitting")
sys.exit(1)
else:
break
else:
logger.critical("Unable to handle exception, quitting")
sys.exit(1)
try:
get_torrents(root_list['content'], args.all, base_dir)
except KeyboardInterrupt:
print("Exiting...")
sys.exit(1)
if threading.active_count() > 1:
print("Waiting for download processes to finish")
logger.debug("Active threads: " + str(threading.active_count()))
while threading.active_count() > 1:
pass
print("Exiting...")
sys.exit(0)
if __name__ == "__main__":
main()
| {
"content_hash": "e7d68e98848b6b6026b20ccee7d01bf8",
"timestamp": "",
"source": "github",
"line_count": 427,
"max_line_length": 101,
"avg_line_length": 40.768149882903984,
"alnum_prop": 0.4954044117647059,
"repo_name": "dommtardif/premium2lib",
"id": "74e1444c0c099b76399948e58878f3619c3d906a",
"size": "17748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "premium2lib.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17748"
}
],
"symlink_target": ""
} |
from cement.core.foundation import CementApp
from cement.utils.misc import init_defaults
from cement.core.controller import CementBaseController, expose
from sets import Set
from core.Util import Util
from core.Parser import Parser
import string
import slugify
import feedparser
import re
import os
defaults = init_defaults('slowgerman')
defaults['slowgerman']['slow_german_rss_url'] = 'http://slowgerman.com/feed/podcast/'
defaults['slowgerman']['show_last_entries'] = 10
defaults['slowgerman']['output_root_path'] = './word_lists/slowgerman'
class SlowGermanBaseController(CementBaseController):
class Meta:
arguments = [
(['-la', '--list-all'],
dict(action='store_true', help='List all slow german entries')),
]
label = 'base'
config_defaults = defaults
@expose(hide=True, aliases=['run'])
def default(self):
limit = self.Meta.config_defaults['slowgerman']['show_last_entries']
url = self.Meta.config_defaults['slowgerman']['slow_german_rss_url']
root_path = self.Meta.config_defaults['slowgerman']['output_root_path']
self.app.log.info('Parsing the last {0} slow german feeds'.format(limit))
feed = feedparser.parse(url)
for i in range(limit):
title = feed['entries'][i]['title']
title = slugify.slugify(title, only_ascii=True)
content = feed['entries'][i]['content'][0]['value']
content = Util().remove_html(content)
self.app.log.info('Beginning to create word list')
unique_words = Set()
word_dict, stats = Parser().parse(content)
for word_set in word_dict.values():
for word in word_set:
if len(word) > 3:
word = filter(unicode.isalnum, word)
unique_words.add(word.lower())
self.app.log.info('Finished word list')
#self.app.log.info('Article {0} contains {1} unique words'.format(title, len(unique_words)))
stats.print_stats()
path_to_word_list = os.path.join(root_path, title)
if not os.path.exists(path_to_word_list):
os.makedirs(os.path.join(root_path, title))
path_to_word_list = os.path.join(root_path, title)
word_list_file = open(os.path.join(path_to_word_list, 'wordlist.txt'), 'w')
for word in unique_words:
word_list_file.write("{0}\n".format(word.encode('utf-8')))
self.app.log.info('finished parsing')
class SlowGermanApp(CementApp):
class Meta:
label = 'slowgerman'
base_controller = SlowGermanBaseController
with SlowGermanApp() as app:
app.run()
| {
"content_hash": "2556629492199b71b89d4f11515390ce",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 104,
"avg_line_length": 36.4025974025974,
"alnum_prop": 0.601855155190867,
"repo_name": "jayrod/wordify",
"id": "1ece06666d707ba67b20ef029673d29bccf8be02",
"size": "2803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slowgerman.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24576"
}
],
"symlink_target": ""
} |
"""Model for aircraft flights"""
class Flight:
"""A flight with a particular passenger aircraft"""
def __init__(self, number, aircraft):
if not number[:2].isalpha():
raise ValueError("No airline code in '{}'".format(number))
if not number[:2].isupper():
raise ValueError("Invalid airline code '{}'".format(number))
if not (number[2:].isdigit() and int(number[2:]) <= 9999):
raise ValueError("Invalid route number '{}'".format(number))
self._number = number
self._aircraft = aircraft
rows, seats = self._aircraft.seating_plan()
self._seating = [None] + [{letter: None for letter in seats} for _ in rows]
def number(self):
return self._number
def airline(self):
return self._number[:2]
def aircraft_model(self):
return self._aircraft.model()
def _parse_seat(self, seat):
"""Parse a seat designator into valid row and letter.
Args:
seat: A seat designator such as 12F
Returns:
A tuple containing an integer and a string for row and seat.
"""
rows, seat_letters = self._aircraft.seating_plan()
letter = seat[-1]
if letter not in seat_letters:
raise ValueError("Invalid seat row {}".format(letter))
row_text = seat[:-1]
try:
row = int(row_text)
except ValueError:
raise ValueError("Invalid seat row{}".format(row_text))
if row not in rows:
raise ValueError("Invalid row number {}".format(row))
return row, letter
def allocate_seat(self, seat, passenger):
"""Allocate a seat to a passenger.
Args:
seat: A seat designator such as '12C' or '21F'.
passenger: The passenger name.
Raises:
ValueError: If the seat is unavailable.
"""
row, letter = self._parse_seat(seat)
if self._seating[row][letter] is not None:
raise ValueError("Seat {} already occupied".format(seat))
self._seating[row][letter] = passenger
def relocate_passenger(self, from_seat, to_seat):
"""Relocate a passenger to a different seat.
Args:
from_seat: The existing seat designator for the
passenger to be moved.
to_seat: The new seat designator.
"""
from_row, from_letter = self._parse_seat(from_seat)
if self._seating[from_row][from_letter] is None:
raise ValueError("No passenger to relocate in seat {}".format(from_seat))
to_row, to_letter = self._parse_seat(to_seat)
if self._seating[to_row][to_letter] is not None:
raise ValueError("Seat {} already occupied.".format(to_seat))
self._seating[to_row][to_letter] = self._seating[from_row][from_letter]
self._seating[from_row][from_letter] = None
def num_available_seats(self):
return sum(sum(1 for s in row.values() if s is None)
for row in self._seating
if row is not None)
def make_boarding_cards(self, card_printer):
for passenger, seat in sorted(self._passenger_seats()):
card_printer(passenger, seat, self.number(), self.aircraft_model())
def _passenger_seats(self):
"""An iterable series of passenger seating allocations."""
row_numbers, seat_letters = self._aircraft.seating_plan()
for row in row_numbers:
for letter in seat_letters:
passenger = self._seating[row][letter]
if passenger is not None:
yield (passenger, "{}{}".format(row, letter))
class Aircraft:
def __init__(self, registration):
self._registration = registration
def registration(self):
return self._registration
def num_seats(self):
rows, row_seats = self.seating_plan()
return len(rows) * len(row_seats)
class AirbusA319(Aircraft):
def model(self):
return "Airbus 319"
def seating_plan(self):
return range(1, 23), "ABCDEF"
class Boeing777(Aircraft):
def model(self):
return "Boeing 777"
def seating_plan(self):
return range(1, 56), "ABCDEGHJK"
def make_flights():
f = Flight("BA1234", AirbusA319("B-DGED"))
f.allocate_seat("12A", "Jenny Celly")
f.allocate_seat("15F", "Martin Castillo")
f.allocate_seat("1E", "Mateo Castillo")
f.allocate_seat("20D", "Leilah Castillo")
f.allocate_seat("21C", "Nelly Vernaza")
g = Flight("AC1287", Boeing777("K-IEHD"))
g.allocate_seat("55K", "Elton John")
g.allocate_seat("33G", "Yoko Ono")
g.allocate_seat("4A", "Prince")
g.allocate_seat("4B", "Dave Matthews")
return f, g
def console_card_printer(passenger, seat, flight_nuimber, aircraft):
output = "| Name: {0}" \
" Flight: {1}" \
" Seat: {2}" \
" Aircraft: {3}" \
" |".format(passenger, flight_nuimber, seat, aircraft)
banner = '+' + '-' * (len(output) - 2) + '+'
border = '|' + ' ' * (len(output) - 2) + '|'
lines = [banner, border, output, border, banner]
card = '\n'.join(lines)
print(card)
print() | {
"content_hash": "043d60495da044177d2a4c80d976e0d2",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 85,
"avg_line_length": 30.91812865497076,
"alnum_prop": 0.5770758464157367,
"repo_name": "marencas/LearningPython",
"id": "7048ee1ad1d189fb8166aac89f67ec012f579496",
"size": "5287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PythonFundamentals/classes/airtravel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37807"
}
],
"symlink_target": ""
} |
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='elyse',
version='0.1.1',
description='A simple static website generator.',
author='Frank Smit',
author_email='frank@61924.nl',
license='MIT',
long_description=open('README.rst').read(),
scripts=['elyse'],
install_requires=[
'pyyaml',
'tornado',
'houdini.py',
'misaka',
'pygments',
'unidecode'
],
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Topic :: Text Processing',
'Topic :: Utilities'
]
)
| {
"content_hash": "97abb8d8ae6055001fed35f559a8cb11",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 53,
"avg_line_length": 23.742857142857144,
"alnum_prop": 0.5667870036101083,
"repo_name": "FSX/elyse",
"id": "76f79e6860f447d229507109c52c4393f413b94d",
"size": "831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14705"
}
],
"symlink_target": ""
} |
import os
import sys
import pdb
import unittest
sys.path.insert(0, os.path.abspath('../deep/tools'))
sys.path.insert(0, os.path.abspath('./'))
import state
class EV_one(state.event): pass
class EV_two(state.event): pass
class EV_three(state.event): pass
class EV_four(state.event): pass
class ST_A(state.state):
def __init__(self):
state.state.__init__(self)
def transitions(self):
self.trans = {
EV_one() : ST_B(),
EV_two() : ST_C(),
EV_three() : ST_D(),
}
class ST_B(state.state):
def __init__(self):
state.state.__init__(self)
def transitions(self):
self.trans = {
EV_one() : ST_B(),
EV_two() : ST_C(),
}
class ST_C(state.state):
def __init__(self):
state.state.__init__(self)
def transitions(self):
self.trans = {
EV_two() : ST_A(),
}
class ST_D(state.state):
def __init__(self):
state.state.__init__(self)
def transitions(self):
self.trans = {
EV_four() : ST_E(),
}
def run(self, event):
return EV_four()
class ST_E(state.state):
def __init__(self):
state.state.__init__(self)
def transitions(self):
self.trans = {
}
class TestFSM1(unittest.TestCase):
def setUp(self) :
self.fsm = state.machine(ST_A())
def testEvent1(self):
self.fsm.injectEvent(EV_one())
self.fsm.injectEvent(EV_one())
self.fsm.injectEvent(EV_two())
self.fsm.injectEvent(EV_two())
self.assertTrue('.ST_A' in str(self.fsm.currentState))
def testEvent2(self):
self.fsm.injectEvent(EV_two())
self.fsm.injectEvent(EV_two())
self.assertTrue('.ST_A' in str(self.fsm.currentState))
def testTrans(self):
self.fsm.injectEvent(EV_three())
self.assertTrue('.ST_E' in str(self.fsm.currentState))
def testSuite():
_tests = ['testEvent1',
'testEvent2',
'testTrans',]
_suite = unittest.TestSuite(map(TestFSM1, _tests))
return _suite
if __name__ == '__main__':
_testRunner = unittest.TextTestRunner(verbosity=2)
_suites = [testSuite(),]
_allTests = unittest.TestSuite(_suites)
_testRunner.run(_allTests)
| {
"content_hash": "8f9c52aeaa76d4d33792b67f534cd4fb",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 62,
"avg_line_length": 21.7196261682243,
"alnum_prop": 0.5516351118760757,
"repo_name": "deepgrant/deep-tools",
"id": "d844193535553bfbce7cd68870ae0dae8e6a389b",
"size": "2346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/teststate.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "80325"
}
],
"symlink_target": ""
} |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
def main():
#error("oba_our_errors_train.dat", "train_error.png", 0, "Train")
error("oba_our_errors_test.dat", "test_error.pdf", 0)
def error(filename, output_filename, sched_error_value):
data = np.loadtxt(filename)
k = np.arange(1,data.shape[0]+1,1)
oba_error = np.array(data[:,0]).reshape(data.shape[0],1)
mode_error = np.array(data[:,1]).reshape(data.shape[0],1)
#sched_error = np.empty(data.shape[0]).reshape(data.shape[0],1)
#sched_error.fill(sched_error_value)
fig = plt.figure()
#fig.suptitle("Error vs. K - {} set".format(type), fontsize=13, fontweight='bold')
fontsize = 14
ax = plt.gca()
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
tick.label1.set_fontweight('bold')
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
tick.label1.set_fontweight('bold')
plt.plot(k, oba_error, "b", lw=3)
plt.plot(k, mode_error, "r", lw=3)
#plt.plot(k, sched_error, "b")
plt.xlabel("k (number of segments)", fontweight='bold')
plt.ylabel("RMSE (seconds)", fontweight='bold')
plt.legend(['OBA', 'OUR'], loc="lower right")
plt.savefig(output_filename)
plt.show()
if __name__ == '__main__':
main() | {
"content_hash": "54eaacde09da22d2b92768fe43b7a47b",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 86,
"avg_line_length": 33.609756097560975,
"alnum_prop": 0.6211901306240929,
"repo_name": "sameersingh/onebusaway",
"id": "9d5fa5bfe3a53ed51e402596b83bfef7013d5df1",
"size": "1378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ml/oba_ml/error_plotter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1117"
},
{
"name": "Java",
"bytes": "170241"
},
{
"name": "JavaScript",
"bytes": "340900"
},
{
"name": "Matlab",
"bytes": "10570"
},
{
"name": "Python",
"bytes": "9788"
},
{
"name": "Shell",
"bytes": "995"
}
],
"symlink_target": ""
} |
from netforce.model import Model, fields, get_model
class Tax(Model):
_name = "hr.payslip.tax"
_fields = {
"payslip_id": fields.Many2One("hr.payslip", "Payslip"),
"name": fields.Char("Description", size=255),
"code": fields.Char("Code"),
"amount": fields.Decimal("Amount"),
}
def onchange_amount(self, context={}):
data = context["data"]
print(data)
return data
Tax.register()
| {
"content_hash": "ca62674dd63fa7a9a22f9d7b31399663",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 63,
"avg_line_length": 25.166666666666668,
"alnum_prop": 0.5827814569536424,
"repo_name": "anastue/netforce",
"id": "2c64e2578367a111d8796c3ef50169c5aab8ead7",
"size": "1558",
"binary": false,
"copies": "4",
"ref": "refs/heads/stable-3.1",
"path": "netforce_hr/netforce_hr/models/hr_payslip_tax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "73"
},
{
"name": "CSS",
"bytes": "407336"
},
{
"name": "Groff",
"bytes": "15858"
},
{
"name": "HTML",
"bytes": "477928"
},
{
"name": "Java",
"bytes": "11870"
},
{
"name": "JavaScript",
"bytes": "3711952"
},
{
"name": "Makefile",
"bytes": "353"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "3455528"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
} |
"""
MIT License
Copyright (c) 2013-2016 Frantisek Uhrecky
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from GroupDialog import GroupDialog
from PyQt4 import QtCore
from TransController import tr
from GroupController import GroupController
import logging
import InfoMsgBoxes
class EditGroupDialog(GroupDialog):
def __init__(self, parent, g_id):
"""
Initialize group dailog for editting groups
@param parent: object containing _db_ctrl attribute, DB controller
@param g_id: group ID to edit
"""
self.__parent = parent
self.__group = None
self.loadGroup(g_id)
super(EditGroupDialog, self).__init__(parent._db_ctrl)
self.setGroup()
self.disableSaveButton()
def loadGroup(self, g_id):
"""
Load group with ID.
@param g_id: group ID
"""
logging.info("loading group with ID: '%i'", g_id)
group_ctrl = GroupController(self.__parent._db_ctrl)
self.__group = group_ctrl.selectById(g_id)
def setGroup(self):
"""
Set loaded group to inputs.
"""
# set names
self._name.setText(QtCore.QString.fromUtf8(tr(self.__group._name)))
self._desc.setText(QtCore.QString.fromUtf8(tr(self.__group._description)))
# load icons and set current icon
self.loadIcons(self.__group._icon._id)
def initUi(self):
"""
Initialize UI.
"""
GroupDialog.initUi(self)
self.setWindowTitle(QtCore.QString.fromUtf8(tr("Edit group")) + ": " + QtCore.QString.fromUtf8(tr(self.__group._name)))
def saveChanges(self):
"""
Method for saving changes into db.
"""
try:
group_ctrl = GroupController(self.__parent._db_ctrl)
# prepare data
self.__group._name = str(self._name.text().toUtf8())
self.__group._description = str(self._desc.text().toUtf8())
icon_id = self.getIconId()
group_ctrl.updateGroup(self.__group._id, self.__group._name, self.__group._description, icon_id)
self.accept()
except Exception as e:
logging.exception(e)
InfoMsgBoxes.showErrorMsg(e) | {
"content_hash": "c62810e44e06722b5d69af11b72a4fba",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 127,
"avg_line_length": 35.76288659793814,
"alnum_prop": 0.6140098010954166,
"repo_name": "FrUh/userpass-manager",
"id": "c92522ddf5cead9cdf4c592991adfaa4ce314449",
"size": "3511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "EditGroupDialog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "196510"
}
],
"symlink_target": ""
} |
'''
The Clients subpackage exists for one purpose - providing implementations
of the MACI Client IDL interface for Python. Most developers will only care
about looking at the SimpleClient module as it contains the documentation for
PySimpleClient, but the BaseClient module can be quite interesting also. The
final thing to note is developers should also look over the documentation for
the ContainerServices module in the Servants subpackage as PySimpleClient is
derived from an extremely useful class there.
'''
__revision__ = "$Id: __init__.py,v 1.4 2005/02/23 00:04:55 dfugate Exp $"
| {
"content_hash": "80cdb67c93cee9d47464a3dd743b7d61",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 77,
"avg_line_length": 58.8,
"alnum_prop": 0.7959183673469388,
"repo_name": "csrg-utfsm/acscb",
"id": "116a346b4f8f3023c81f6057a5e3082e47d4331c",
"size": "588",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "LGPL/CommonSoftware/acspy/src/Acspy/Clients/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "633"
},
{
"name": "Batchfile",
"bytes": "2346"
},
{
"name": "C",
"bytes": "751150"
},
{
"name": "C++",
"bytes": "7892598"
},
{
"name": "CSS",
"bytes": "21364"
},
{
"name": "Elixir",
"bytes": "906"
},
{
"name": "Emacs Lisp",
"bytes": "1990066"
},
{
"name": "FreeMarker",
"bytes": "7369"
},
{
"name": "GAP",
"bytes": "14867"
},
{
"name": "Gnuplot",
"bytes": "437"
},
{
"name": "HTML",
"bytes": "1857062"
},
{
"name": "Haskell",
"bytes": "764"
},
{
"name": "Java",
"bytes": "13573740"
},
{
"name": "JavaScript",
"bytes": "19058"
},
{
"name": "Lex",
"bytes": "5101"
},
{
"name": "Makefile",
"bytes": "1624406"
},
{
"name": "Module Management System",
"bytes": "4925"
},
{
"name": "Objective-C",
"bytes": "3223"
},
{
"name": "PLSQL",
"bytes": "9496"
},
{
"name": "Perl",
"bytes": "120411"
},
{
"name": "Python",
"bytes": "4191000"
},
{
"name": "Roff",
"bytes": "9920"
},
{
"name": "Shell",
"bytes": "1198375"
},
{
"name": "Smarty",
"bytes": "21615"
},
{
"name": "Tcl",
"bytes": "227078"
},
{
"name": "XSLT",
"bytes": "100454"
},
{
"name": "Yacc",
"bytes": "5006"
}
],
"symlink_target": ""
} |
from django.urls import reverse
from wakawaka.tests.base import BaseTestCase
class IndexTestCase(BaseTestCase):
"""
Index and WikiIndex tests.
"""
def test_calling_home_redircts_to_wikiindex(self):
"""
Calling the homepage `/` will automatically redirect to the
`WikiIndex` index page.
"""
response = self.client.get(reverse('wakawaka_index'))
self.assertEqual(response.status_code, 302)
if self.is_django_18():
self.assertEqual(response['Location'], 'http://testserver/WikiIndex/')
else:
self.assertEqual(response['Location'], '/WikiIndex/')
def test_wikiindex_is_a_setting(self):
"""
This Homepage name `WikiIndex` can be set by a setting.
"""
with self.settings(WAKAWAKA_DEFAULT_INDEX='WikiWukuIndex'):
response = self.client.get(reverse('wakawaka_index'))
self.assertEqual(response.status_code, 302)
if self.is_django_18():
self.assertEqual(
response['Location'], 'http://testserver/WikiWukuIndex/'
)
else:
self.assertEqual(response['Location'], '/WikiWukuIndex/')
| {
"content_hash": "13ddeb8b168e73b1c249385567da2e46",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 82,
"avg_line_length": 33.2972972972973,
"alnum_prop": 0.5982142857142857,
"repo_name": "bartTC/django-wakawaka",
"id": "ffe9bae7c87dcd43eb7981dd18ae69b655813471",
"size": "1232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wakawaka/tests/test_index.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9973"
},
{
"name": "Python",
"bytes": "49533"
}
],
"symlink_target": ""
} |
import os
from twisted.cred.portal import Portal
from txweb2 import responsecode
from txweb2.auth import basic
from txweb2.stream import MemoryStream
from txweb2.dav.util import davXMLFromStream
from txweb2.dav.auth import TwistedPasswordProperty, IPrincipal, DavRealm, TwistedPropertyChecker, AuthenticationWrapper
from txweb2.dav.fileop import rmdir
from txweb2.test.test_server import SimpleRequest
from txweb2.dav.test.util import Site, serialize
from txweb2.dav.test.test_resource import \
TestDAVPrincipalResource, TestPrincipalsCollection
from txdav.xml import element
import txweb2.dav.test.util
class ACL(txweb2.dav.test.util.TestCase):
"""
RFC 3744 (WebDAV ACL) tests.
"""
def createDocumentRoot(self):
docroot = self.mktemp()
os.mkdir(docroot)
userResource = TestDAVPrincipalResource("/principals/users/user01")
userResource.writeDeadProperty(TwistedPasswordProperty("user01"))
principalCollection = TestPrincipalsCollection(
"/principals/",
children={
"users": TestPrincipalsCollection(
"/principals/users/",
children={"user01": userResource}
)
}
)
rootResource = self.resource_class(
docroot, principalCollections=(principalCollection,))
portal = Portal(DavRealm())
portal.registerChecker(TwistedPropertyChecker())
credentialFactories = (basic.BasicCredentialFactory(""),)
loginInterfaces = (IPrincipal,)
self.site = Site(AuthenticationWrapper(
rootResource,
portal,
credentialFactories,
credentialFactories,
loginInterfaces
))
rootResource.setAccessControlList(self.grant(element.All()))
for name, acl in (
("none", self.grant()),
("read", self.grant(element.Read())),
("read-write", self.grant(element.Read(), element.Write())),
("unlock", self.grant(element.Unlock())),
("all", self.grant(element.All())),
):
filename = os.path.join(docroot, name)
if not os.path.isfile(filename):
file(filename, "w").close()
resource = self.resource_class(filename)
resource.setAccessControlList(acl)
for name, acl in (
("nobind", self.grant()),
("bind", self.grant(element.Bind())),
("unbind", self.grant(element.Bind(), element.Unbind())),
):
dirname = os.path.join(docroot, name)
if not os.path.isdir(dirname):
os.mkdir(dirname)
resource = self.resource_class(dirname)
resource.setAccessControlList(acl)
return docroot
def restore(self):
# Get rid of whatever messed up state the test has now so that we'll
# get a fresh docroot. This isn't very cool; tests should be doing
# less so that they don't need a fresh copy of this state.
if hasattr(self, "_docroot"):
rmdir(self._docroot)
del self._docroot
def test_COPY_MOVE_source(self):
"""
Verify source access controls during COPY and MOVE.
"""
def work():
dst_path = os.path.join(self.docroot, "copy_dst")
dst_uri = "/" + os.path.basename(dst_path)
for src, status in (
("nobind", responsecode.FORBIDDEN),
("bind", responsecode.FORBIDDEN),
("unbind", responsecode.CREATED),
):
src_path = os.path.join(self.docroot, "src_" + src)
src_uri = "/" + os.path.basename(src_path)
if not os.path.isdir(src_path):
os.mkdir(src_path)
src_resource = self.resource_class(src_path)
src_resource.setAccessControlList({
"nobind": self.grant(),
"bind": self.grant(element.Bind()),
"unbind": self.grant(element.Bind(), element.Unbind())
}[src])
for name, acl in (
("none", self.grant()),
("read", self.grant(element.Read())),
("read-write", self.grant(element.Read(), element.Write())),
("unlock", self.grant(element.Unlock())),
("all", self.grant(element.All())),
):
filename = os.path.join(src_path, name)
if not os.path.isfile(filename):
file(filename, "w").close()
self.resource_class(filename).setAccessControlList(acl)
for method in ("COPY", "MOVE"):
for name, code in (
("none", {"COPY": responsecode.FORBIDDEN, "MOVE": status}[method]),
("read", {"COPY": responsecode.CREATED, "MOVE": status}[method]),
("read-write", {"COPY": responsecode.CREATED, "MOVE": status}[method]),
("unlock", {"COPY": responsecode.FORBIDDEN, "MOVE": status}[method]),
("all", {"COPY": responsecode.CREATED, "MOVE": status}[method]),
):
path = os.path.join(src_path, name)
uri = src_uri + "/" + name
request = SimpleRequest(self.site, method, uri)
request.headers.setHeader("destination", dst_uri)
_add_auth_header(request)
def test(response, code=code, path=path):
if os.path.isfile(dst_path):
os.remove(dst_path)
if response.code != code:
return self.oops(request, response, code, method, name)
yield (request, test)
return serialize(self.send, work())
def test_COPY_MOVE_dest(self):
"""
Verify destination access controls during COPY and MOVE.
"""
def work():
src_path = os.path.join(self.docroot, "read")
uri = "/" + os.path.basename(src_path)
for method in ("COPY", "MOVE"):
for name, code in (
("nobind", responsecode.FORBIDDEN),
("bind", responsecode.CREATED),
("unbind", responsecode.CREATED),
):
dst_parent_path = os.path.join(self.docroot, name)
dst_path = os.path.join(dst_parent_path, "dst")
request = SimpleRequest(self.site, method, uri)
request.headers.setHeader("destination", "/" + name + "/dst")
_add_auth_header(request)
def test(response, code=code, dst_path=dst_path):
if os.path.isfile(dst_path):
os.remove(dst_path)
if response.code != code:
return self.oops(request, response, code, method, name)
yield (request, test)
self.restore()
return serialize(self.send, work())
def test_DELETE(self):
"""
Verify access controls during DELETE.
"""
def work():
for name, code in (
("nobind", responsecode.FORBIDDEN),
("bind", responsecode.FORBIDDEN),
("unbind", responsecode.NO_CONTENT),
):
collection_path = os.path.join(self.docroot, name)
path = os.path.join(collection_path, "dst")
file(path, "w").close()
request = SimpleRequest(self.site, "DELETE", "/" + name + "/dst")
_add_auth_header(request)
def test(response, code=code, path=path):
if response.code != code:
return self.oops(request, response, code, "DELETE", name)
yield (request, test)
return serialize(self.send, work())
def test_UNLOCK(self):
"""
Verify access controls during UNLOCK of unowned lock.
"""
raise NotImplementedError()
test_UNLOCK.todo = "access controls on UNLOCK unimplemented"
def test_MKCOL_PUT(self):
"""
Verify access controls during MKCOL.
"""
for method in ("MKCOL", "PUT"):
def work():
for name, code in (
("nobind", responsecode.FORBIDDEN),
("bind", responsecode.CREATED),
("unbind", responsecode.CREATED),
):
collection_path = os.path.join(self.docroot, name)
path = os.path.join(collection_path, "dst")
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
os.rmdir(path)
request = SimpleRequest(self.site, method, "/" + name + "/dst")
_add_auth_header(request)
def test(response, code=code, path=path):
if response.code != code:
return self.oops(request, response, code, method, name)
yield (request, test)
return serialize(self.send, work())
def test_PUT_exists(self):
"""
Verify access controls during PUT of existing file.
"""
def work():
for name, code in (
("none", responsecode.FORBIDDEN),
("read", responsecode.FORBIDDEN),
("read-write", responsecode.NO_CONTENT),
("unlock", responsecode.FORBIDDEN),
("all", responsecode.NO_CONTENT),
):
path = os.path.join(self.docroot, name)
request = SimpleRequest(self.site, "PUT", "/" + name)
_add_auth_header(request)
def test(response, code=code, path=path):
if response.code != code:
return self.oops(request, response, code, "PUT", name)
yield (request, test)
return serialize(self.send, work())
def test_PROPFIND(self):
"""
Verify access controls during PROPFIND.
"""
raise NotImplementedError()
test_PROPFIND.todo = "access controls on PROPFIND unimplemented"
def test_PROPPATCH(self):
"""
Verify access controls during PROPPATCH.
"""
def work():
for name, code in (
("none", responsecode.FORBIDDEN),
("read", responsecode.FORBIDDEN),
("read-write", responsecode.MULTI_STATUS),
("unlock", responsecode.FORBIDDEN),
("all", responsecode.MULTI_STATUS),
):
path = os.path.join(self.docroot, name)
request = SimpleRequest(self.site, "PROPPATCH", "/" + name)
request.stream = MemoryStream(
element.WebDAVDocument(element.PropertyUpdate()).toxml()
)
_add_auth_header(request)
def test(response, code=code, path=path):
if response.code != code:
return self.oops(request, response, code, "PROPPATCH", name)
yield (request, test)
return serialize(self.send, work())
def test_GET_REPORT(self):
"""
Verify access controls during GET and REPORT.
"""
def work():
for method in ("GET", "REPORT"):
if method == "GET":
ok = responsecode.OK
elif method == "REPORT":
ok = responsecode.MULTI_STATUS
else:
raise AssertionError("We shouldn't be here. (method = %r)" % (method,))
for name, code in (
("none", responsecode.FORBIDDEN),
("read", ok),
("read-write", ok),
("unlock", responsecode.FORBIDDEN),
("all", ok),
):
path = os.path.join(self.docroot, name)
request = SimpleRequest(self.site, method, "/" + name)
if method == "REPORT":
request.stream = MemoryStream(element.PrincipalPropertySearch().toxml())
_add_auth_header(request)
def test(response, code=code, path=path):
if response.code != code:
return self.oops(request, response, code, method, name)
yield (request, test)
return serialize(self.send, work())
def oops(self, request, response, code, method, name):
def gotResponseData(doc):
if doc is None:
doc_xml = None
else:
doc_xml = doc.toxml()
def fail(acl):
self.fail("Incorrect status code %s (!= %s) for %s of resource %s with %s ACL: %s\nACL: %s"
% (response.code, code, method, request.uri, name, doc_xml, acl.toxml()))
def getACL(resource):
return resource.accessControlList(request)
d = request.locateResource(request.uri)
d.addCallback(getACL)
d.addCallback(fail)
return d
d = davXMLFromStream(response.stream)
d.addCallback(gotResponseData)
return d
def _add_auth_header(request):
request.headers.setHeader(
"authorization",
("basic", "user01:user01".encode("base64"))
)
| {
"content_hash": "43d514d9405a0f9089ba441a2ded3c2c",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 120,
"avg_line_length": 36.675461741424805,
"alnum_prop": 0.5077697841726618,
"repo_name": "macosforge/ccs-calendarserver",
"id": "75adc984dc05734a019d87c9f5624978213a2ba6",
"size": "15068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txweb2/dav/test/test_acl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import sys
import re
import git
import requests
from datetime import datetime
CHANGELOG = 'CHANGES'
API_URL = 'https://forge-allura.apache.org/rest/p/allura/tickets/search?limit=1000&q=ticket_num:({0})'
def main():
from_ref, to_ref, version = get_versions()
tickets, changes_without_tickets = get_tickets(from_ref, to_ref)
summaries = get_ticket_summaries(tickets)
print_changelog(version, summaries, changes_without_tickets)
def get_versions():
return sys.argv[1], sys.argv[2], sys.argv[3]
def get_tickets(from_ref, to_ref):
repo = git.Repo('.')
ticket_nums = set()
changes_without_tickets = []
ref_spec = '..'.join([from_ref, to_ref])
for commit in repo.iter_commits(ref_spec):
match = re.match(r'\s*\[#(\d+)\]', commit.summary)
if match:
ticket_nums.add(match.group(1))
else:
changes_without_tickets.append(commit.summary)
return list(ticket_nums), changes_without_tickets
def get_ticket_summaries(tickets):
summaries = {}
r = requests.get(API_URL.format(' '.join(tickets)))
if r.status_code != 200:
raise ValueError('Unexpected response code: {0}'.format(r.status_code))
for ticket in r.json()['tickets']:
summaries[ticket['ticket_num']] = ticket['summary']
return summaries
def print_changelog(version, summaries, changes_without_tickets):
print 'Version {version} ({date})\n'.format(**{
'version': version,
'date': datetime.utcnow().strftime('%B %Y'),
})
for ticket in sorted(summaries.keys()):
print " * [#{0}] {1}".format(ticket, summaries[ticket].encode('utf-8'))
for change in changes_without_tickets:
print " * {}".format(change.encode('utf-8'))
if __name__ == '__main__':
main()
| {
"content_hash": "f84ef9081a83a754a1e2c35cb467562b",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 102,
"avg_line_length": 30.775862068965516,
"alnum_prop": 0.6352941176470588,
"repo_name": "heiths/allura",
"id": "61aafddd65a02cf7cc321167417b08c27efcdbc2",
"size": "2678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/changelog.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6142"
},
{
"name": "CSS",
"bytes": "173671"
},
{
"name": "HTML",
"bytes": "751039"
},
{
"name": "JavaScript",
"bytes": "1136845"
},
{
"name": "Makefile",
"bytes": "7788"
},
{
"name": "Puppet",
"bytes": "6872"
},
{
"name": "Python",
"bytes": "4238265"
},
{
"name": "RAML",
"bytes": "26153"
},
{
"name": "Ruby",
"bytes": "7006"
},
{
"name": "Shell",
"bytes": "131827"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
} |
from tkinter import *
class App:
def __init__(self, master):
frame = Frame(master)
frame.pack()
top_label = Label(frame, text="New AMC question")
top_label.grid(row=0, columnspan=3)
self.question_label = StringVar()
label_label = Label(frame, text="Question label:")
label_label.grid(row=1, sticky=W)
label_entry = Entry(frame, textvariable=self.question_label)
label_entry.grid(row=1, column=1, sticky=W)
self.question_text = StringVar()
question_label = Label(frame, text="Question text:")
question_label.grid(row=2, sticky=W)
question_entry = Entry(frame, textvariable=self.question_text)
question_entry.grid(row=2, column=1, sticky=W)
label_entry.focus_set()
self.correct_ans = IntVar()
self.ans_texts = []
ans_labels = []
ans_entries = []
ans_radios = []
rows_before_ans = 3
self.answer_count = 4
for n in range(self.answer_count):
self.ans_texts.append(StringVar())
ans_labels.append(Label(frame, text="Answer " + str(n + 1) + ":"))
ans_labels[n].grid(row=rows_before_ans + n, sticky=W)
ans_entries.append(Entry(frame, textvariable=self.ans_texts[n]))
ans_entries[n].grid(row=rows_before_ans + n, column=1, sticky=W)
ans_radios.append(Radiobutton(frame, variable=self.correct_ans,
value=n))
ans_radios[n].grid(row=rows_before_ans + n, column=2, sticky=W)
write_button = Button(frame, text="Generate question",
command=self.print_to_terminal)
write_button.grid(row=rows_before_ans + self.answer_count, columnspan=3)
def print_to_terminal(self):
print("\\begin{question}{" + self.question_label.get() + "}")
print(" " + self.question_text.get())
print(" \\begin{choices}")
marks = ["correct" if n == self.correct_ans.get() else "wrong" for n in
range(self.answer_count)]
for n in range(self.answer_count):
print(" \\" + marks[n] + "choice{" + self.ans_texts[n].get() + "}")
print(" \\end{choices}")
print("\\end{question}")
root = Tk()
app = App(root)
root.mainloop()
| {
"content_hash": "895d1633c820ae411f51f5d73dd52601",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 76,
"avg_line_length": 30.823529411764707,
"alnum_prop": 0.6335877862595419,
"repo_name": "jarthurgross/amc_question_creator",
"id": "d8f84b20347d91db582bfeda8aa88ca38309baf0",
"size": "2096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "create_amc_question.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27591"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import dateparser
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DateParser'
copyright = u'2014, Scrapinghub'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = dateparser.__version__
# The full version, including alpha/beta/rc tags.
release = dateparser.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dateparserdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'dateparser.tex',
u'DateParser Documentation',
u'Scrapinghub', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dateparser',
u'DateParser Documentation',
[u'Scrapinghub'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'dateparser',
u'DateParser Documentation',
u'Scrapinghub',
'dateparser',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "ff62cbee20bb11fe73d6cf9611b375fd",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 76,
"avg_line_length": 30.68076923076923,
"alnum_prop": 0.7036479879654005,
"repo_name": "seagatesoft/dateparser",
"id": "99d941e7d42c8cfc094d97e9caf9845ea90742cd",
"size": "8422",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1580"
},
{
"name": "Python",
"bytes": "165991"
}
],
"symlink_target": ""
} |
import re
import os
class VirtualFsHandler(object):
def usage(self):
return get_help()
def triage_message(self, message, client):
if message['type'] != 'stream':
return False
original_content = message['content']
return original_content.startswith('fs ')
def handle_message(self, message, client, state_handler):
assert self.triage_message(message, client)
original_content = message['content']
command = original_content[len('fs '):]
stream = message['display_recipient']
topic = message['subject']
state = state_handler.get_state()
if state is None:
state = {}
if stream not in state:
state[stream] = fs_new()
fs = state[stream]
fs, msg = fs_command(fs, command)
state[stream] = fs
state_handler.set_state(state)
client.send_message(dict(
type='stream',
to=stream,
subject=topic,
content=msg,
))
def get_help():
return '''
The "fs" commands implement a virtual file system for a stream.
The locations of text are persisted for the lifetime of the bot
running, and if you rename a stream, you will lose the info.
Example commands:
```
fs mkdir: create a directory
fs ls: list a directory
fs write: write text
fs read: read text
fs rm: remove a file
```
Use commands like `fs help write` for more details on specific
commands.
'''
def test():
fs = fs_new()
assert is_directory(fs, '/')
for cmd, expected_response in sample_conversation():
fs, msg = fs_command(fs, cmd)
if msg != expected_response:
raise AssertionError('''
cmd: %s
expected: %s
but got : %s
''' % (cmd, expected_response, msg))
def sample_conversation():
return [
('write /foo contents of /foo', 'file written'),
('read /foo', 'contents of /foo'),
('write /bar Contents: bar bar', 'file written'),
('read /bar', 'Contents: bar bar'),
('write /bar invalid', 'ERROR: file already exists'),
('rm /bar', 'removed'),
('rm /bar', 'ERROR: file does not exist'),
('write /bar new bar', 'file written'),
('read /bar', 'new bar'),
('write /yo/invalid whatever', 'ERROR: /yo is not a directory'),
('mkdir /yo', 'directory created'),
('read /yo', 'ERROR: /yo is a directory, file required'),
('ls /yo', 'WARNING: directory is empty'),
('read /yo/nada', 'ERROR: file does not exist'),
('write /yo whatever', 'ERROR: file already exists'),
('write /yo/apple red', 'file written'),
('read /yo/apple', 'red'),
('mkdir /yo/apple', 'ERROR: file already exists'),
('ls /invalid', 'ERROR: file does not exist'),
('ls /foo', 'ERROR: /foo is not a directory'),
('ls /', '* /bar\n* /foo\n* /yo'),
('invalid command', 'ERROR: unrecognized command'),
('write', 'ERROR: syntax: write <path> <some_text>'),
('help', get_help()),
('help ls', 'syntax: ls <path>'),
('help invalid_command', get_help()),
]
REGEXES = dict(
command='(ls|mkdir|read|rm|write)',
path='(\S+)',
some_text='(.+)',
)
def get_commands():
return {
'help': (fs_help, ['command']),
'ls': (fs_ls, ['path']),
'mkdir': (fs_mkdir, ['path']),
'read': (fs_read, ['path']),
'rm': (fs_rm, ['path']),
'write': (fs_write, ['path', 'some_text']),
}
def fs_command(fs, cmd):
if cmd.strip() == 'help':
return fs, get_help()
cmd_name = cmd.split()[0]
commands = get_commands()
if cmd_name not in commands:
return fs, 'ERROR: unrecognized command'
f, arg_names = commands[cmd_name]
partial_regexes = [cmd_name] + [REGEXES[a] for a in arg_names]
regex = ' '.join(partial_regexes)
m = re.match(regex, cmd)
if m:
return f(fs, *m.groups())
elif cmd_name == 'help':
return fs, get_help()
else:
return fs, 'ERROR: ' + syntax_help(cmd_name)
def syntax_help(cmd_name):
commands = get_commands()
f, arg_names = commands[cmd_name]
arg_syntax = ' '.join('<' + a + '>' for a in arg_names)
return 'syntax: %s %s' % (cmd_name, arg_syntax)
def fs_new():
fs = {
'/': directory([])
}
return fs
def fs_help(fs, cmd_name):
return fs, syntax_help(cmd_name)
def fs_mkdir(fs, fn):
if fn in fs:
return fs, 'ERROR: file already exists'
dir_path = os.path.dirname(fn)
if not is_directory(fs, dir_path):
msg = 'ERROR: %s is not a directory' % (dir_path,)
return fs, msg
new_fs = fs.copy()
new_dir = directory({fn}.union(fs[dir_path]['fns']))
new_fs[dir_path] = new_dir
new_fs[fn] = directory([])
msg = 'directory created'
return new_fs, msg
def fs_ls(fs, fn):
if fn not in fs:
msg = 'ERROR: file does not exist'
return fs, msg
if not is_directory(fs, fn):
return fs, 'ERROR: %s is not a directory' % (fn,)
fns = fs[fn]['fns']
if not fns:
return fs, 'WARNING: directory is empty'
msg = '\n'.join('* ' + fn for fn in sorted(fns))
return fs, msg
def fs_rm(fs, fn):
if fn not in fs:
msg = 'ERROR: file does not exist'
return fs, msg
new_fs = fs.copy()
new_fs.pop(fn)
msg = 'removed'
return new_fs, msg
def fs_write(fs, fn, content):
if fn in fs:
msg = 'ERROR: file already exists'
return fs, msg
dir_path = os.path.dirname(fn)
if not is_directory(fs, dir_path):
msg = 'ERROR: %s is not a directory' % (dir_path,)
return fs, msg
new_fs = fs.copy()
new_dir = directory({fn}.union(fs[dir_path]['fns']))
new_fs[dir_path] = new_dir
new_fs[fn] = text_file(content)
msg = 'file written'
return new_fs, msg
def fs_read(fs, fn):
if fn not in fs:
msg = 'ERROR: file does not exist'
return fs, msg
if fs[fn]['kind'] == 'dir':
msg = 'ERROR: {} is a directory, file required'.format(fn)
return fs, msg
val = fs[fn]['content']
return fs, val
def directory(fns):
return dict(kind='dir', fns=set(fns))
def text_file(content):
return dict(kind='text', content=content)
def is_directory(fs, fn):
if fn not in fs:
return False
return fs[fn]['kind'] == 'dir'
handler_class = VirtualFsHandler
if __name__ == '__main__':
# We eventually want to test bots with a "real" testing
# framework.
test()
| {
"content_hash": "8d0dc08f187bc0b9aca18377da3f4cb1",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 72,
"avg_line_length": 28.463519313304722,
"alnum_prop": 0.5553377563329313,
"repo_name": "Diptanshu8/zulip",
"id": "9df542e4c0a2a50ca76b7dbf3ddfef6882adbcbe",
"size": "6688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib_bots/lib/virtual_fs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "241493"
},
{
"name": "Groovy",
"bytes": "5516"
},
{
"name": "HTML",
"bytes": "459622"
},
{
"name": "JavaScript",
"bytes": "1470745"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "82312"
},
{
"name": "Python",
"bytes": "2932435"
},
{
"name": "Ruby",
"bytes": "249748"
},
{
"name": "Shell",
"bytes": "35313"
}
],
"symlink_target": ""
} |
import math
from klampt import *
from klampt.glprogram import *
from klampt import vectorops
from klampt import so3
#import numpy as np
class GLIKTest(GLRealtimeProgram):
def __init__(self,world):
GLRealtimeProgram.__init__(self,"GLIKTest")
self.world = world
self.position = (0,0,0)
def display(self):
#draw the world
self.world.drawGL()
#draw the end-effector position and the desired position
link = 7
localpos = (0.17,0,0)
robot = self.world.robot(0)
eepos = robot.link(link).getWorldPosition(localpos)
glDisable(GL_LIGHTING)
glDisable(GL_DEPTH_TEST)
glPointSize(5.0)
glEnable(GL_POINT_SMOOTH)
glBegin(GL_POINTS)
glColor3f(1,1,0)
glVertex3fv(eepos)
glColor3f(1,1,1)
glVertex3fv(self.position)
glEnd()
glEnable(GL_DEPTH_TEST)
def solve_ik(self,robotlink,localpos,worldpos):
"""IMPLEMENT ME: solve inverse kinematics to place the 3D point
localpos on robotlink (a RobotLink instance) at the 3D position
worldpos in the world coordinate frame.
Returns the robot configuration that solves the IK problem.
"""
linkindex = robotlink.index
robot = robotlink.robot()
#hint: your code should look like this
#obj = ik.objective(robotlink,...)
# # In the ... you should do something to set up the objective so
# # that the point localpos on the link is matched to worldpos.
# # See klampt/ik.py for more details.
#s = ik.solver(obj)
# # Set up some parameters for the numerical solver
#maxIters = 100
#tol = 1e-3
# # Optionally you can set an initial configuration like so:
# # robot.setConfig([0]*robot.numLinks())
# # or set a random initial configuration like so:
# # s.sampleInitial()
#(res,iters) = s.solve(maxIters,tol);
#return robot.getConfig()
#right now this just sets the zero configuration
q = [0]*robot.numLinks()
return q
def idle(self):
#set the desired position self.position to move in a circle
r = 0.4
self.position = (0.8,r*math.cos(self.ttotal),0.7+r*math.sin(self.ttotal))
link = 7
localpos = (0.17,0,0)
robot = self.world.robot(0)
q = self.solve_ik(robot.link(link),localpos,self.position)
robot.setConfig(q)
if __name__ == "__main__":
world = WorldModel()
res = world.readFile("ex2_file.xml")
if not res: raise RuntimeError("Unable to load world file")
GLIKTest(world).run()
| {
"content_hash": "49128b3399a1633af7e99b608812a3a3",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 81,
"avg_line_length": 34.5125,
"alnum_prop": 0.5900036218761319,
"repo_name": "bbgw/Klampt",
"id": "6cc1fe90b496cd0a26f21c20976c637ca4bba976",
"size": "2761",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/exercises/ik/ex2.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4484"
},
{
"name": "C++",
"bytes": "4479181"
},
{
"name": "CMake",
"bytes": "56429"
},
{
"name": "GLSL",
"bytes": "56"
},
{
"name": "Makefile",
"bytes": "5465"
},
{
"name": "Python",
"bytes": "1104503"
},
{
"name": "QMake",
"bytes": "3587"
},
{
"name": "Shell",
"bytes": "283"
}
],
"symlink_target": ""
} |
import sublime_plugin
class RenameFileCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.view.window().run_command("rename_path", {"paths": [self.view.file_name()]})
| {
"content_hash": "4d684243a611078b32baf64af852f3cf",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 83,
"avg_line_length": 30.5,
"alnum_prop": 0.73224043715847,
"repo_name": "Loamhoof/sublime-plugins-dump",
"id": "a30ba7418206a2e196810a113d053fd76dc2c89d",
"size": "183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RenameFile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15370"
}
],
"symlink_target": ""
} |
from celery import task
@task(max_retries=3)
def dummy_task(arg):
return arg + arg
| {
"content_hash": "8e3cd24588a7d45ea76b5cbc0b6e9189",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 23,
"avg_line_length": 14.833333333333334,
"alnum_prop": 0.6966292134831461,
"repo_name": "strycore/djung",
"id": "085a3e1d3803aece9859ff2170c780da492f5e5a",
"size": "89",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "core/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "25"
},
{
"name": "HTML",
"bytes": "8234"
},
{
"name": "JavaScript",
"bytes": "730"
},
{
"name": "Makefile",
"bytes": "718"
},
{
"name": "Python",
"bytes": "12695"
},
{
"name": "Shell",
"bytes": "1086"
}
],
"symlink_target": ""
} |
"""Wrappers for various units of text, including the main
:class:`TextBlob <textblob.blob.TextBlob>`, :class:`Word <textblob.blob.Word>`,
and :class:`WordList <textblob.blob.WordList>` classes.
Example usage: ::
>>> from textblob import TextBlob
>>> b = TextBlob("Simple is better than complex.")
>>> b.tags
[(u'Simple', u'NN'), (u'is', u'VBZ'), (u'better', u'JJR'), (u'than', u'IN'), (u'complex', u'NN')]
>>> b.noun_phrases
WordList([u'simple'])
>>> b.words
WordList([u'Simple', u'is', u'better', u'than', u'complex'])
>>> b.sentiment
(0.06666666666666667, 0.41904761904761906)
>>> b.words[0].synsets()[0]
Synset('simple.n.01')
.. versionchanged:: 0.8.0
These classes are now imported from ``textblob`` rather than ``text.blob``.
"""
from __future__ import unicode_literals, absolute_import
import sys
import json
from collections import defaultdict
import nltk
from textblob.decorators import cached_property, requires_nltk_corpus
from textblob.utils import lowerstrip, PUNCTUATION_REGEX
from textblob.inflect import singularize as _singularize, pluralize as _pluralize
from textblob.mixins import BlobComparableMixin, StringlikeMixin
from textblob.compat import unicode, basestring
from textblob.base import (BaseNPExtractor, BaseTagger, BaseTokenizer,
BaseSentimentAnalyzer, BaseParser)
from textblob.np_extractors import FastNPExtractor
from textblob.taggers import NLTKTagger
from textblob.tokenizers import WordTokenizer, sent_tokenize, word_tokenize
from textblob.sentiments import PatternAnalyzer
from textblob.parsers import PatternParser
from textblob.translate import Translator
from textblob.en import suggest
# Wordnet interface
# NOTE: textblob.wordnet is not imported so that the wordnet corpus can be lazy-loaded
_wordnet = nltk.corpus.wordnet
def _penn_to_wordnet(tag):
"""Converts a Penn corpus tag into a Wordnet tag."""
if tag in ("NN", "NNS", "NNP", "NNPS"):
return _wordnet.NOUN
if tag in ("JJ", "JJR", "JJS"):
return _wordnet.ADJ
if tag in ("VB", "VBD", "VBG", "VBN", "VBP", "VBZ"):
return _wordnet.VERB
if tag in ("RB", "RBR", "RBS"):
return _wordnet.ADV
return None
class Word(unicode):
"""A simple word representation. Includes methods for inflection,
translation, and WordNet integration.
"""
translator = Translator()
def __new__(cls, string, pos_tag=None):
"""Return a new instance of the class. It is necessary to override
this method in order to handle the extra pos_tag argument in the
constructor.
"""
return super(Word, cls).__new__(cls, string)
def __init__(self, string, pos_tag=None):
self.string = string
self.pos_tag = pos_tag
def __repr__(self):
return repr(self.string)
def __str__(self):
return self.string
def singularize(self):
"""Return the singular version of the word as a string."""
return Word(_singularize(self.string))
def pluralize(self):
'''Return the plural version of the word as a string.'''
return Word(_pluralize(self.string))
def translate(self, from_lang='auto', to="en"):
'''Translate the word to another language using Google's
Translate API.
.. versionadded:: 0.5.0
'''
return self.translator.translate(self.string,
from_lang=from_lang, to_lang=to)
def detect_language(self):
'''Detect the word's language using Google's Translate API.
.. versionadded:: 0.5.0
'''
return self.translator.detect(self.string)
def spellcheck(self):
'''Return a list of (word, confidence) tuples of spelling corrections.
Based on: Peter Norvig, "How to Write a Spelling Corrector"
(http://norvig.com/spell-correct.html) as implemented in the pattern
library.
.. versionadded:: 0.6.0
'''
return suggest(self.string)
def correct(self):
'''Correct the spelling of the word. Returns the word with the highest
confidence using the spelling corrector.
.. versionadded:: 0.6.0
'''
return Word(self.spellcheck()[0][0])
@cached_property
@requires_nltk_corpus
def lemma(self):
"""Return the lemma of this word using Wordnet's morphy function.
"""
tag = _penn_to_wordnet(self.pos_tag) if (self.pos_tag is not None) else None
return self.lemmatize(pos=tag)
@requires_nltk_corpus
def lemmatize(self, pos=None):
"""Return the lemma for a word using WordNet's morphy function.
:param pos: Part of speech to filter upon. If `None`, defaults to
``_wordnet.NOUN``.
.. versionadded:: 0.8.1
"""
if pos is None:
pos = _wordnet.NOUN
lemmatizer = nltk.stem.WordNetLemmatizer()
return lemmatizer.lemmatize(self.string, pos)
PorterStemmer = nltk.stem.porter.PorterStemmer()
LancasterStemmer = nltk.stem.lancaster.LancasterStemmer()
SnowballStemmer = nltk.stem.snowball.SnowballStemmer("english")
#added 'stemmer' on lines of lemmatizer
#based on nltk
def stem(self, stemmer=PorterStemmer):
"""Stem a word using various NLTK stemmers. (Default: Porter Stemmer)
.. versionadded:: 0.12.0
"""
return stemmer.stem(self.string)
@cached_property
def synsets(self):
"""The list of Synset objects for this Word.
:rtype: list of Synsets
.. versionadded:: 0.7.0
"""
return self.get_synsets(pos=None)
@cached_property
def definitions(self):
"""The list of definitions for this word. Each definition corresponds
to a synset.
.. versionadded:: 0.7.0
"""
return self.define(pos=None)
def get_synsets(self, pos=None):
"""Return a list of Synset objects for this word.
:param pos: A part-of-speech tag to filter upon. If ``None``, all
synsets for all parts of speech will be loaded.
:rtype: list of Synsets
.. versionadded:: 0.7.0
"""
return _wordnet.synsets(self.string, pos)
def define(self, pos=None):
"""Return a list of definitions for this word. Each definition
corresponds to a synset for this word.
:param pos: A part-of-speech tag to filter upon. If ``None``, definitions
for all parts of speech will be loaded.
:rtype: List of strings
.. versionadded:: 0.7.0
"""
return [syn.definition() for syn in self.get_synsets(pos=pos)]
class WordList(list):
"""A list-like collection of words."""
def __init__(self, collection):
"""Initialize a WordList. Takes a collection of strings as
its only argument.
"""
self._collection = [Word(w) for w in collection]
super(WordList, self).__init__(self._collection)
def __str__(self):
return str(self._collection)
def __repr__(self):
"""Returns a string representation for debugging."""
class_name = self.__class__.__name__
return '{cls}({lst})'.format(cls=class_name, lst=repr(self._collection))
def __getitem__(self, key):
"""Returns a string at the given index."""
if isinstance(key, slice):
return self.__class__(self._collection[key])
else:
return self._collection[key]
def __getslice__(self, i, j):
# This is included for Python 2.* compatibility
return self.__class__(self._collection[i:j])
def __iter__(self):
return iter(self._collection)
def count(self, strg, case_sensitive=False, *args, **kwargs):
"""Get the count of a word or phrase `s` within this WordList.
:param strg: The string to count.
:param case_sensitive: A boolean, whether or not the search is case-sensitive.
"""
if not case_sensitive:
return [word.lower() for word in self].count(strg.lower(), *args,
**kwargs)
return self._collection.count(strg, *args, **kwargs)
def append(self, obj):
"""Append an object to end. If the object is a string, appends a
:class:`Word <Word>` object.
"""
if isinstance(obj, basestring):
return self._collection.append(Word(obj))
else:
return self._collection.append(obj)
def extend(self, iterable):
"""Extend WordList by appending elements from ``iterable``. If an element
is a string, appends a :class:`Word <Word>` object.
"""
[self._collection.append(Word(e) if isinstance(e, basestring) else e)
for e in iterable]
return self
def upper(self):
"""Return a new WordList with each word upper-cased."""
return self.__class__([word.upper() for word in self])
def lower(self):
"""Return a new WordList with each word lower-cased."""
return self.__class__([word.lower() for word in self])
def singularize(self):
"""Return the single version of each word in this WordList."""
return self.__class__([word.singularize() for word in self])
def pluralize(self):
"""Return the plural version of each word in this WordList."""
return self.__class__([word.pluralize() for word in self])
def lemmatize(self):
"""Return the lemma of each word in this WordList."""
return self.__class__([word.lemmatize() for word in self])
def stem(self, *args, **kwargs):
"""Return the stem for each word in this WordList."""
return self.__class__([word.stem(*args, **kwargs) for word in self])
def _validated_param(obj, name, base_class, default, base_class_name=None):
"""Validates a parameter passed to __init__. Makes sure that obj is
the correct class. Return obj if it's not None or falls back to default
:param obj: The object passed in.
:param name: The name of the parameter.
:param base_class: The class that obj must inherit from.
:param default: The default object to fall back upon if obj is None.
"""
base_class_name = base_class_name if base_class_name else base_class.__name__
if obj is not None and not isinstance(obj, base_class):
raise ValueError('{name} must be an instance of {cls}'
.format(name=name, cls=base_class_name))
return obj or default
def _initialize_models(obj, tokenizer, pos_tagger,
np_extractor, analyzer, parser, classifier):
"""Common initialization between BaseBlob and Blobber classes."""
# tokenizer may be a textblob or an NLTK tokenizer
obj.tokenizer = _validated_param(tokenizer, "tokenizer",
base_class=(BaseTokenizer, nltk.tokenize.api.TokenizerI),
default=BaseBlob.tokenizer,
base_class_name="BaseTokenizer")
obj.np_extractor = _validated_param(np_extractor, "np_extractor",
base_class=BaseNPExtractor,
default=BaseBlob.np_extractor)
obj.pos_tagger = _validated_param(pos_tagger, "pos_tagger",
BaseTagger, BaseBlob.pos_tagger)
obj.analyzer = _validated_param(analyzer, "analyzer",
BaseSentimentAnalyzer, BaseBlob.analyzer)
obj.parser = _validated_param(parser, "parser", BaseParser, BaseBlob.parser)
obj.classifier = classifier
class BaseBlob(StringlikeMixin, BlobComparableMixin):
"""An abstract base class that all textblob classes will inherit from.
Includes words, POS tag, NP, and word count properties. Also includes
basic dunder and string methods for making objects like Python strings.
:param text: A string.
:param tokenizer: (optional) A tokenizer instance. If ``None``,
defaults to :class:`WordTokenizer() <textblob.tokenizers.WordTokenizer>`.
:param np_extractor: (optional) An NPExtractor instance. If ``None``,
defaults to :class:`FastNPExtractor() <textblob.en.np_extractors.FastNPExtractor>`.
:param pos_tagger: (optional) A Tagger instance. If ``None``,
defaults to :class:`NLTKTagger <textblob.en.taggers.NLTKTagger>`.
:param analyzer: (optional) A sentiment analyzer. If ``None``,
defaults to :class:`PatternAnalyzer <textblob.en.sentiments.PatternAnalyzer>`.
:param parser: A parser. If ``None``, defaults to
:class:`PatternParser <textblob.en.parsers.PatternParser>`.
:param classifier: A classifier.
.. versionchanged:: 0.6.0
``clean_html`` parameter deprecated, as it was in NLTK.
"""
np_extractor = FastNPExtractor()
pos_tagger = NLTKTagger()
tokenizer = WordTokenizer()
translator = Translator()
analyzer = PatternAnalyzer()
parser = PatternParser()
def __init__(self, text, tokenizer=None,
pos_tagger=None, np_extractor=None, analyzer=None,
parser=None, classifier=None, clean_html=False):
if not isinstance(text, basestring):
raise TypeError('The `text` argument passed to `__init__(text)` '
'must be a string, not {0}'.format(type(text)))
if clean_html:
raise NotImplementedError("clean_html has been deprecated. "
"To remove HTML markup, use BeautifulSoup's "
"get_text() function")
self.raw = self.string = text
self.stripped = lowerstrip(self.raw, all=True)
_initialize_models(self, tokenizer, pos_tagger, np_extractor, analyzer,
parser, classifier)
@cached_property
def words(self):
"""Return a list of word tokens. This excludes punctuation characters.
If you want to include punctuation characters, access the ``tokens``
property.
:returns: A :class:`WordList <WordList>` of word tokens.
"""
return WordList(word_tokenize(self.raw, include_punc=False))
@cached_property
def tokens(self):
"""Return a list of tokens, using this blob's tokenizer object
(defaults to :class:`WordTokenizer <textblob.tokenizers.WordTokenizer>`).
"""
return WordList(self.tokenizer.tokenize(self.raw))
def tokenize(self, tokenizer=None):
"""Return a list of tokens, using ``tokenizer``.
:param tokenizer: (optional) A tokenizer object. If None, defaults to
this blob's default tokenizer.
"""
t = tokenizer if tokenizer is not None else self.tokenizer
return WordList(t.tokenize(self.raw))
def parse(self, parser=None):
"""Parse the text.
:param parser: (optional) A parser instance. If ``None``, defaults to
this blob's default parser.
.. versionadded:: 0.6.0
"""
p = parser if parser is not None else self.parser
return p.parse(self.raw)
def classify(self):
"""Classify the blob using the blob's ``classifier``."""
if self.classifier is None:
raise NameError("This blob has no classifier. Train one first!")
return self.classifier.classify(self.raw)
@cached_property
def sentiment(self):
"""Return a tuple of form (polarity, subjectivity ) where polarity
is a float within the range [-1.0, 1.0] and subjectivity is a float
within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is
very subjective.
:rtype: namedtuple of the form ``Sentiment(polarity, subjectivity)``
"""
return self.analyzer.analyze(self.raw)
@cached_property
def polarity(self):
"""Return the polarity score as a float within the range [-1.0, 1.0]
:rtype: float
"""
return PatternAnalyzer().analyze(self.raw)[0]
@cached_property
def subjectivity(self):
"""Return the subjectivity score as a float within the range [0.0, 1.0]
where 0.0 is very objective and 1.0 is very subjective.
:rtype: float
"""
return PatternAnalyzer().analyze(self.raw)[1]
@cached_property
def noun_phrases(self):
"""Returns a list of noun phrases for this blob."""
return WordList([phrase.strip().lower()
for phrase in self.np_extractor.extract(self.raw)
if len(phrase) > 1])
@cached_property
def pos_tags(self):
"""Returns an list of tuples of the form (word, POS tag).
Example:
::
[('At', 'IN'), ('eight', 'CD'), ("o'clock", 'JJ'), ('on', 'IN'),
('Thursday', 'NNP'), ('morning', 'NN')]
:rtype: list of tuples
"""
return [(Word(word, pos_tag=t), unicode(t))
for word, t in self.pos_tagger.tag(self.raw)
if not PUNCTUATION_REGEX.match(unicode(t))]
tags = pos_tags
@cached_property
def word_counts(self):
"""Dictionary of word frequencies in this text.
"""
counts = defaultdict(int)
stripped_words = [lowerstrip(word) for word in self.words]
for word in stripped_words:
counts[word] += 1
return counts
@cached_property
def np_counts(self):
"""Dictionary of noun phrase frequencies in this text.
"""
counts = defaultdict(int)
for phrase in self.noun_phrases:
counts[phrase] += 1
return counts
def ngrams(self, n=3):
"""Return a list of n-grams (tuples of n successive words) for this
blob.
:rtype: List of :class:`WordLists <WordList>`
"""
if n <= 0:
return []
grams = [WordList(self.words[i:i + n])
for i in range(len(self.words) - n + 1)]
return grams
def translate(self, from_lang="auto", to="en"):
"""Translate the blob to another language.
Uses the Google Translate API. Returns a new TextBlob.
Requires an internet connection.
Usage:
::
>>> b = TextBlob("Simple is better than complex")
>>> b.translate(to="es")
TextBlob('Lo simple es mejor que complejo')
Language code reference:
https://developers.google.com/translate/v2/using_rest#language-params
.. versionadded:: 0.5.0.
:param str from_lang: Language to translate from. If ``None``, will attempt
to detect the language.
:param str to: Language to translate to.
:rtype: :class:`BaseBlob <BaseBlob>`
"""
return self.__class__(self.translator.translate(self.raw,
from_lang=from_lang, to_lang=to))
def detect_language(self):
"""Detect the blob's language using the Google Translate API.
Requires an internet connection.
Usage:
::
>>> b = TextBlob("bonjour")
>>> b.detect_language()
u'fr'
Language code reference:
https://developers.google.com/translate/v2/using_rest#language-params
.. versionadded:: 0.5.0
:rtype: str
"""
return self.translator.detect(self.raw)
def correct(self):
"""Attempt to correct the spelling of a blob.
.. versionadded:: 0.6.0
:rtype: :class:`BaseBlob <BaseBlob>`
"""
# regex matches: word or punctuation or whitespace
tokens = nltk.tokenize.regexp_tokenize(self.raw, "\w+|[^\w\s]|\s")
corrected = (Word(w).correct() for w in tokens)
ret = ''.join(corrected)
return self.__class__(ret)
def _cmpkey(self):
"""Key used by ComparableMixin to implement all rich comparison
operators.
"""
return self.raw
def _strkey(self):
"""Key used by StringlikeMixin to implement string methods."""
return self.raw
def __hash__(self):
return hash(self._cmpkey())
def __add__(self, other):
'''Concatenates two text objects the same way Python strings are
concatenated.
Arguments:
- `other`: a string or a text object
'''
if isinstance(other, basestring):
return self.__class__(self.raw + other)
elif isinstance(other, BaseBlob):
return self.__class__(self.raw + other.raw)
else:
raise TypeError('Operands must be either strings or {0} objects'
.format(self.__class__.__name__))
def split(self, sep=None, maxsplit=sys.maxsize):
"""Behaves like the built-in str.split() except returns a
WordList.
:rtype: :class:`WordList <WordList>`
"""
return WordList(self._strkey().split(sep, maxsplit))
class TextBlob(BaseBlob):
"""A general text block, meant for larger bodies of text (esp. those
containing sentences). Inherits from :class:`BaseBlob <BaseBlob>`.
:param str text: A string.
:param tokenizer: (optional) A tokenizer instance. If ``None``, defaults to
:class:`WordTokenizer() <textblob.tokenizers.WordTokenizer>`.
:param np_extractor: (optional) An NPExtractor instance. If ``None``,
defaults to :class:`FastNPExtractor() <textblob.en.np_extractors.FastNPExtractor>`.
:param pos_tagger: (optional) A Tagger instance. If ``None``, defaults to
:class:`NLTKTagger <textblob.en.taggers.NLTKTagger>`.
:param analyzer: (optional) A sentiment analyzer. If ``None``, defaults to
:class:`PatternAnalyzer <textblob.en.sentiments.PatternAnalyzer>`.
:param classifier: (optional) A classifier.
"""
@cached_property
def sentences(self):
"""Return list of :class:`Sentence <Sentence>` objects."""
return self._create_sentence_objects()
@cached_property
def words(self):
"""Return a list of word tokens. This excludes punctuation characters.
If you want to include punctuation characters, access the ``tokens``
property.
:returns: A :class:`WordList <WordList>` of word tokens.
"""
return WordList(word_tokenize(self.raw, include_punc=False))
@property
def raw_sentences(self):
"""List of strings, the raw sentences in the blob."""
return [sentence.raw for sentence in self.sentences]
@property
def serialized(self):
"""Returns a list of each sentence's dict representation."""
return [sentence.dict for sentence in self.sentences]
def to_json(self, *args, **kwargs):
'''Return a json representation (str) of this blob.
Takes the same arguments as json.dumps.
.. versionadded:: 0.5.1
'''
return json.dumps(self.serialized, *args, **kwargs)
@property
def json(self):
'''The json representation of this blob.
.. versionchanged:: 0.5.1
Made ``json`` a property instead of a method to restore backwards
compatibility that was broken after version 0.4.0.
'''
return self.to_json()
def _create_sentence_objects(self):
'''Returns a list of Sentence objects from the raw text.
'''
sentence_objects = []
sentences = sent_tokenize(self.raw)
char_index = 0 # Keeps track of character index within the blob
for sent in sentences:
# Compute the start and end indices of the sentence
# within the blob
start_index = self.raw.index(sent, char_index)
char_index += len(sent)
end_index = start_index + len(sent)
# Sentences share the same models as their parent blob
s = Sentence(sent, start_index=start_index, end_index=end_index,
tokenizer=self.tokenizer, np_extractor=self.np_extractor,
pos_tagger=self.pos_tagger, analyzer=self.analyzer,
parser=self.parser, classifier=self.classifier)
sentence_objects.append(s)
return sentence_objects
class Sentence(BaseBlob):
"""A sentence within a TextBlob. Inherits from :class:`BaseBlob <BaseBlob>`.
:param sentence: A string, the raw sentence.
:param start_index: An int, the index where this sentence begins
in a TextBlob. If not given, defaults to 0.
:param end_index: An int, the index where this sentence ends in
a TextBlob. If not given, defaults to the
length of the sentence - 1.
"""
def __init__(self, sentence, start_index=0, end_index=None, *args, **kwargs):
super(Sentence, self).__init__(sentence, *args, **kwargs)
#: The start index within a TextBlob
self.start = self.start_index = start_index
#: The end index within a textBlob
self.end = self.end_index = end_index or len(sentence) - 1
@property
def dict(self):
'''The dict representation of this sentence.'''
return {
'raw': self.raw,
'start_index': self.start_index,
'end_index': self.end_index,
'stripped': self.stripped,
'noun_phrases': self.noun_phrases,
'polarity': self.polarity,
'subjectivity': self.subjectivity,
}
class Blobber(object):
"""A factory for TextBlobs that all share the same tagger,
tokenizer, parser, classifier, and np_extractor.
Usage:
>>> from textblob import Blobber
>>> from textblob.taggers import NLTKTagger
>>> from textblob.tokenizers import SentenceTokenizer
>>> tb = Blobber(pos_tagger=NLTKTagger(), tokenizer=SentenceTokenizer())
>>> blob1 = tb("This is one blob.")
>>> blob2 = tb("This blob has the same tagger and tokenizer.")
>>> blob1.pos_tagger is blob2.pos_tagger
True
:param tokenizer: (optional) A tokenizer instance. If ``None``,
defaults to :class:`WordTokenizer() <textblob.tokenizers.WordTokenizer>`.
:param np_extractor: (optional) An NPExtractor instance. If ``None``,
defaults to :class:`FastNPExtractor() <textblob.en.np_extractors.FastNPExtractor>`.
:param pos_tagger: (optional) A Tagger instance. If ``None``,
defaults to :class:`NLTKTagger <textblob.en.taggers.NLTKTagger>`.
:param analyzer: (optional) A sentiment analyzer. If ``None``,
defaults to :class:`PatternAnalyzer <textblob.en.sentiments.PatternAnalyzer>`.
:param parser: A parser. If ``None``, defaults to
:class:`PatternParser <textblob.en.parsers.PatternParser>`.
:param classifier: A classifier.
.. versionadded:: 0.4.0
"""
np_extractor = FastNPExtractor()
pos_tagger = NLTKTagger()
tokenizer = WordTokenizer()
analyzer = PatternAnalyzer()
parser = PatternParser()
def __init__(self, tokenizer=None, pos_tagger=None, np_extractor=None,
analyzer=None, parser=None, classifier=None):
_initialize_models(self, tokenizer, pos_tagger, np_extractor, analyzer,
parser, classifier)
def __call__(self, text):
"""Return a new TextBlob object with this Blobber's ``np_extractor``,
``pos_tagger``, ``tokenizer``, ``analyzer``, and ``classifier``.
:returns: A new :class:`TextBlob <TextBlob>`.
"""
return TextBlob(text, tokenizer=self.tokenizer, pos_tagger=self.pos_tagger,
np_extractor=self.np_extractor, analyzer=self.analyzer,
parser=self.parser,
classifier=self.classifier)
def __repr__(self):
classifier_name = self.classifier.__class__.__name__ + "()" if self.classifier else "None"
return ("Blobber(tokenizer={0}(), pos_tagger={1}(), "
"np_extractor={2}(), analyzer={3}(), parser={4}(), classifier={5})")\
.format(self.tokenizer.__class__.__name__,
self.pos_tagger.__class__.__name__,
self.np_extractor.__class__.__name__,
self.analyzer.__class__.__name__,
self.parser.__class__.__name__,
classifier_name)
__str__ = __repr__
| {
"content_hash": "a74b570cb6fa031469cb078aa8538dc2",
"timestamp": "",
"source": "github",
"line_count": 769,
"max_line_length": 101,
"avg_line_length": 37.04291287386216,
"alnum_prop": 0.6068244049708629,
"repo_name": "mab1290/GHOST",
"id": "f8182f207ec90a5f69cb0a6d9be5a1623d14abd1",
"size": "28510",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vendor/textblob/blob.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27163"
}
],
"symlink_target": ""
} |
"""Support for Amcrest IP cameras."""
import asyncio
from datetime import timedelta
from functools import partial
import logging
from amcrest import AmcrestError
from haffmpeg.camera import CameraMjpeg
import voluptuous as vol
from homeassistant.components.camera import (
CAMERA_SERVICE_SCHEMA,
SUPPORT_ON_OFF,
SUPPORT_STREAM,
Camera,
)
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.const import CONF_NAME, STATE_OFF, STATE_ON
from homeassistant.helpers.aiohttp_client import (
async_aiohttp_proxy_stream,
async_aiohttp_proxy_web,
async_get_clientsession,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import (
CAMERA_WEB_SESSION_TIMEOUT,
CAMERAS,
COMM_TIMEOUT,
DATA_AMCREST,
DEVICES,
SERVICE_UPDATE,
SNAPSHOT_TIMEOUT,
)
from .helpers import log_update_error, service_signal
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=15)
STREAM_SOURCE_LIST = ["snapshot", "mjpeg", "rtsp"]
_SRV_EN_REC = "enable_recording"
_SRV_DS_REC = "disable_recording"
_SRV_EN_AUD = "enable_audio"
_SRV_DS_AUD = "disable_audio"
_SRV_EN_MOT_REC = "enable_motion_recording"
_SRV_DS_MOT_REC = "disable_motion_recording"
_SRV_GOTO = "goto_preset"
_SRV_CBW = "set_color_bw"
_SRV_TOUR_ON = "start_tour"
_SRV_TOUR_OFF = "stop_tour"
_SRV_PTZ_CTRL = "ptz_control"
_ATTR_PTZ_TT = "travel_time"
_ATTR_PTZ_MOV = "movement"
_MOV = [
"zoom_out",
"zoom_in",
"right",
"left",
"up",
"down",
"right_down",
"right_up",
"left_down",
"left_up",
]
_ZOOM_ACTIONS = ["ZoomWide", "ZoomTele"]
_MOVE_1_ACTIONS = ["Right", "Left", "Up", "Down"]
_MOVE_2_ACTIONS = ["RightDown", "RightUp", "LeftDown", "LeftUp"]
_ACTION = _ZOOM_ACTIONS + _MOVE_1_ACTIONS + _MOVE_2_ACTIONS
_DEFAULT_TT = 0.2
_ATTR_PRESET = "preset"
_ATTR_COLOR_BW = "color_bw"
_CBW_COLOR = "color"
_CBW_AUTO = "auto"
_CBW_BW = "bw"
_CBW = [_CBW_COLOR, _CBW_AUTO, _CBW_BW]
_SRV_GOTO_SCHEMA = CAMERA_SERVICE_SCHEMA.extend(
{vol.Required(_ATTR_PRESET): vol.All(vol.Coerce(int), vol.Range(min=1))}
)
_SRV_CBW_SCHEMA = CAMERA_SERVICE_SCHEMA.extend(
{vol.Required(_ATTR_COLOR_BW): vol.In(_CBW)}
)
_SRV_PTZ_SCHEMA = CAMERA_SERVICE_SCHEMA.extend(
{
vol.Required(_ATTR_PTZ_MOV): vol.In(_MOV),
vol.Optional(_ATTR_PTZ_TT, default=_DEFAULT_TT): cv.small_float,
}
)
CAMERA_SERVICES = {
_SRV_EN_REC: (CAMERA_SERVICE_SCHEMA, "async_enable_recording", ()),
_SRV_DS_REC: (CAMERA_SERVICE_SCHEMA, "async_disable_recording", ()),
_SRV_EN_AUD: (CAMERA_SERVICE_SCHEMA, "async_enable_audio", ()),
_SRV_DS_AUD: (CAMERA_SERVICE_SCHEMA, "async_disable_audio", ()),
_SRV_EN_MOT_REC: (CAMERA_SERVICE_SCHEMA, "async_enable_motion_recording", ()),
_SRV_DS_MOT_REC: (CAMERA_SERVICE_SCHEMA, "async_disable_motion_recording", ()),
_SRV_GOTO: (_SRV_GOTO_SCHEMA, "async_goto_preset", (_ATTR_PRESET,)),
_SRV_CBW: (_SRV_CBW_SCHEMA, "async_set_color_bw", (_ATTR_COLOR_BW,)),
_SRV_TOUR_ON: (CAMERA_SERVICE_SCHEMA, "async_start_tour", ()),
_SRV_TOUR_OFF: (CAMERA_SERVICE_SCHEMA, "async_stop_tour", ()),
_SRV_PTZ_CTRL: (
_SRV_PTZ_SCHEMA,
"async_ptz_control",
(_ATTR_PTZ_MOV, _ATTR_PTZ_TT),
),
}
_BOOL_TO_STATE = {True: STATE_ON, False: STATE_OFF}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up an Amcrest IP Camera."""
if discovery_info is None:
return
name = discovery_info[CONF_NAME]
device = hass.data[DATA_AMCREST][DEVICES][name]
async_add_entities([AmcrestCam(name, device, hass.data[DATA_FFMPEG])], True)
class CannotSnapshot(Exception):
"""Conditions are not valid for taking a snapshot."""
class AmcrestCommandFailed(Exception):
"""Amcrest camera command did not work."""
class AmcrestCam(Camera):
"""An implementation of an Amcrest IP camera."""
def __init__(self, name, device, ffmpeg):
"""Initialize an Amcrest camera."""
super().__init__()
self._name = name
self._api = device.api
self._ffmpeg = ffmpeg
self._ffmpeg_arguments = device.ffmpeg_arguments
self._stream_source = device.stream_source
self._resolution = device.resolution
self._token = self._auth = device.authentication
self._control_light = device.control_light
self._is_recording = False
self._motion_detection_enabled = None
self._brand = None
self._model = None
self._audio_enabled = None
self._motion_recording_enabled = None
self._color_bw = None
self._rtsp_url = None
self._snapshot_task = None
self._unsub_dispatcher = []
self._update_succeeded = False
def _check_snapshot_ok(self):
available = self.available
if not available or not self.is_on:
_LOGGER.warning(
"Attempt to take snapshot when %s camera is %s",
self.name,
"offline" if not available else "off",
)
raise CannotSnapshot
async def _async_get_image(self):
try:
# Send the request to snap a picture and return raw jpg data
# Snapshot command needs a much longer read timeout than other commands.
return await self.hass.async_add_executor_job(
partial(
self._api.snapshot,
timeout=(COMM_TIMEOUT, SNAPSHOT_TIMEOUT),
stream=False,
)
)
except AmcrestError as error:
log_update_error(_LOGGER, "get image from", self.name, "camera", error)
return None
finally:
self._snapshot_task = None
async def async_camera_image(self):
"""Return a still image response from the camera."""
_LOGGER.debug("Take snapshot from %s", self._name)
try:
# Amcrest cameras only support one snapshot command at a time.
# Hence need to wait if a previous snapshot has not yet finished.
# Also need to check that camera is online and turned on before each wait
# and before initiating shapshot.
while self._snapshot_task:
self._check_snapshot_ok()
_LOGGER.debug("Waiting for previous snapshot from %s ...", self._name)
await self._snapshot_task
self._check_snapshot_ok()
# Run snapshot command in separate Task that can't be cancelled so
# 1) it's not possible to send another snapshot command while camera is
# still working on a previous one, and
# 2) someone will be around to catch any exceptions.
self._snapshot_task = self.hass.async_create_task(self._async_get_image())
return await asyncio.shield(self._snapshot_task)
except CannotSnapshot:
return None
async def handle_async_mjpeg_stream(self, request):
"""Return an MJPEG stream."""
# The snapshot implementation is handled by the parent class
if self._stream_source == "snapshot":
return await super().handle_async_mjpeg_stream(request)
if not self.available:
_LOGGER.warning(
"Attempt to stream %s when %s camera is offline",
self._stream_source,
self.name,
)
return None
if self._stream_source == "mjpeg":
# stream an MJPEG image stream directly from the camera
websession = async_get_clientsession(self.hass)
streaming_url = self._api.mjpeg_url(typeno=self._resolution)
stream_coro = websession.get(
streaming_url, auth=self._token, timeout=CAMERA_WEB_SESSION_TIMEOUT
)
return await async_aiohttp_proxy_web(self.hass, request, stream_coro)
# streaming via ffmpeg
streaming_url = self._rtsp_url
stream = CameraMjpeg(self._ffmpeg.binary, loop=self.hass.loop)
await stream.open_camera(streaming_url, extra_cmd=self._ffmpeg_arguments)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.hass,
request,
stream_reader,
self._ffmpeg.ffmpeg_stream_content_type,
)
finally:
await stream.close()
# Entity property overrides
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return True
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def device_state_attributes(self):
"""Return the Amcrest-specific camera state attributes."""
attr = {}
if self._audio_enabled is not None:
attr["audio"] = _BOOL_TO_STATE.get(self._audio_enabled)
if self._motion_recording_enabled is not None:
attr["motion_recording"] = _BOOL_TO_STATE.get(
self._motion_recording_enabled
)
if self._color_bw is not None:
attr[_ATTR_COLOR_BW] = self._color_bw
return attr
@property
def available(self):
"""Return True if entity is available."""
return self._api.available
@property
def supported_features(self):
"""Return supported features."""
return SUPPORT_ON_OFF | SUPPORT_STREAM
# Camera property overrides
@property
def is_recording(self):
"""Return true if the device is recording."""
return self._is_recording
@property
def brand(self):
"""Return the camera brand."""
return self._brand
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return self._motion_detection_enabled
@property
def model(self):
"""Return the camera model."""
return self._model
async def stream_source(self):
"""Return the source of the stream."""
return self._rtsp_url
@property
def is_on(self):
"""Return true if on."""
return self.is_streaming
# Other Entity method overrides
async def async_on_demand_update(self):
"""Update state."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Subscribe to signals and add camera to list."""
for service, params in CAMERA_SERVICES.items():
self._unsub_dispatcher.append(
async_dispatcher_connect(
self.hass,
service_signal(service, self.entity_id),
getattr(self, params[1]),
)
)
self._unsub_dispatcher.append(
async_dispatcher_connect(
self.hass,
service_signal(SERVICE_UPDATE, self._name),
self.async_on_demand_update,
)
)
self.hass.data[DATA_AMCREST][CAMERAS].append(self.entity_id)
async def async_will_remove_from_hass(self):
"""Remove camera from list and disconnect from signals."""
self.hass.data[DATA_AMCREST][CAMERAS].remove(self.entity_id)
for unsub_dispatcher in self._unsub_dispatcher:
unsub_dispatcher()
def update(self):
"""Update entity status."""
if not self.available or self._update_succeeded:
if not self.available:
self._update_succeeded = False
return
_LOGGER.debug("Updating %s camera", self.name)
try:
if self._brand is None:
resp = self._api.vendor_information.strip()
if resp.startswith("vendor="):
self._brand = resp.split("=")[-1]
else:
self._brand = "unknown"
if self._model is None:
resp = self._api.device_type.strip()
if resp.startswith("type="):
self._model = resp.split("=")[-1]
else:
self._model = "unknown"
self.is_streaming = self._get_video()
self._is_recording = self._get_recording()
self._motion_detection_enabled = self._get_motion_detection()
self._audio_enabled = self._get_audio()
self._motion_recording_enabled = self._get_motion_recording()
self._color_bw = self._get_color_mode()
self._rtsp_url = self._api.rtsp_url(typeno=self._resolution)
except AmcrestError as error:
log_update_error(_LOGGER, "get", self.name, "camera attributes", error)
self._update_succeeded = False
else:
self._update_succeeded = True
# Other Camera method overrides
def turn_off(self):
"""Turn off camera."""
self._enable_video(False)
def turn_on(self):
"""Turn on camera."""
self._enable_video(True)
def enable_motion_detection(self):
"""Enable motion detection in the camera."""
self._enable_motion_detection(True)
def disable_motion_detection(self):
"""Disable motion detection in camera."""
self._enable_motion_detection(False)
# Additional Amcrest Camera service methods
async def async_enable_recording(self):
"""Call the job and enable recording."""
await self.hass.async_add_executor_job(self._enable_recording, True)
async def async_disable_recording(self):
"""Call the job and disable recording."""
await self.hass.async_add_executor_job(self._enable_recording, False)
async def async_enable_audio(self):
"""Call the job and enable audio."""
await self.hass.async_add_executor_job(self._enable_audio, True)
async def async_disable_audio(self):
"""Call the job and disable audio."""
await self.hass.async_add_executor_job(self._enable_audio, False)
async def async_enable_motion_recording(self):
"""Call the job and enable motion recording."""
await self.hass.async_add_executor_job(self._enable_motion_recording, True)
async def async_disable_motion_recording(self):
"""Call the job and disable motion recording."""
await self.hass.async_add_executor_job(self._enable_motion_recording, False)
async def async_goto_preset(self, preset):
"""Call the job and move camera to preset position."""
await self.hass.async_add_executor_job(self._goto_preset, preset)
async def async_set_color_bw(self, color_bw):
"""Call the job and set camera color mode."""
await self.hass.async_add_executor_job(self._set_color_bw, color_bw)
async def async_start_tour(self):
"""Call the job and start camera tour."""
await self.hass.async_add_executor_job(self._start_tour, True)
async def async_stop_tour(self):
"""Call the job and stop camera tour."""
await self.hass.async_add_executor_job(self._start_tour, False)
async def async_ptz_control(self, movement, travel_time):
"""Move or zoom camera in specified direction."""
code = _ACTION[_MOV.index(movement)]
kwargs = {"code": code, "arg1": 0, "arg2": 0, "arg3": 0}
if code in _MOVE_1_ACTIONS:
kwargs["arg2"] = 1
elif code in _MOVE_2_ACTIONS:
kwargs["arg1"] = kwargs["arg2"] = 1
try:
await self.hass.async_add_executor_job(
partial(self._api.ptz_control_command, action="start", **kwargs)
)
await asyncio.sleep(travel_time)
await self.hass.async_add_executor_job(
partial(self._api.ptz_control_command, action="stop", **kwargs)
)
except AmcrestError as error:
log_update_error(
_LOGGER, "move", self.name, f"camera PTZ {movement}", error
)
# Methods to send commands to Amcrest camera and handle errors
def _change_setting(self, value, attr, description, action="set"):
func = description.replace(" ", "_")
description = f"camera {description} to {value}"
tries = 3
while True:
try:
getattr(self, f"_set_{func}")(value)
new_value = getattr(self, f"_get_{func}")()
if new_value != value:
raise AmcrestCommandFailed
except (AmcrestError, AmcrestCommandFailed) as error:
if tries == 1:
log_update_error(_LOGGER, action, self.name, description, error)
return
log_update_error(
_LOGGER, action, self.name, description, error, logging.DEBUG
)
else:
if attr:
setattr(self, attr, new_value)
self.schedule_update_ha_state()
return
tries -= 1
def _get_video(self):
return self._api.video_enabled
def _set_video(self, enable):
self._api.video_enabled = enable
def _enable_video(self, enable):
"""Enable or disable camera video stream."""
# Given the way the camera's state is determined by
# is_streaming and is_recording, we can't leave
# recording on if video stream is being turned off.
if self.is_recording and not enable:
self._enable_recording(False)
self._change_setting(enable, "is_streaming", "video")
if self._control_light:
self._change_light()
def _get_recording(self):
return self._api.record_mode == "Manual"
def _set_recording(self, enable):
rec_mode = {"Automatic": 0, "Manual": 1}
self._api.record_mode = rec_mode["Manual" if enable else "Automatic"]
def _enable_recording(self, enable):
"""Turn recording on or off."""
# Given the way the camera's state is determined by
# is_streaming and is_recording, we can't leave
# video stream off if recording is being turned on.
if not self.is_streaming and enable:
self._enable_video(True)
self._change_setting(enable, "_is_recording", "recording")
def _get_motion_detection(self):
return self._api.is_motion_detector_on()
def _set_motion_detection(self, enable):
self._api.motion_detection = str(enable).lower()
def _enable_motion_detection(self, enable):
"""Enable or disable motion detection."""
self._change_setting(enable, "_motion_detection_enabled", "motion detection")
def _get_audio(self):
return self._api.audio_enabled
def _set_audio(self, enable):
self._api.audio_enabled = enable
def _enable_audio(self, enable):
"""Enable or disable audio stream."""
self._change_setting(enable, "_audio_enabled", "audio")
if self._control_light:
self._change_light()
def _get_indicator_light(self):
return "true" in self._api.command(
"configManager.cgi?action=getConfig&name=LightGlobal"
).content.decode("utf-8")
def _set_indicator_light(self, enable):
self._api.command(
f"configManager.cgi?action=setConfig&LightGlobal[0].Enable={str(enable).lower()}"
)
def _change_light(self):
"""Enable or disable indicator light."""
self._change_setting(
self._audio_enabled or self.is_streaming, None, "indicator light"
)
def _get_motion_recording(self):
return self._api.is_record_on_motion_detection()
def _set_motion_recording(self, enable):
self._api.motion_recording = str(enable).lower()
def _enable_motion_recording(self, enable):
"""Enable or disable motion recording."""
self._change_setting(enable, "_motion_recording_enabled", "motion recording")
def _goto_preset(self, preset):
"""Move camera position and zoom to preset."""
try:
self._api.go_to_preset(action="start", preset_point_number=preset)
except AmcrestError as error:
log_update_error(
_LOGGER, "move", self.name, f"camera to preset {preset}", error
)
def _get_color_mode(self):
return _CBW[self._api.day_night_color]
def _set_color_mode(self, cbw):
self._api.day_night_color = _CBW.index(cbw)
def _set_color_bw(self, cbw):
"""Set camera color mode."""
self._change_setting(cbw, "_color_bw", "color mode")
def _start_tour(self, start):
"""Start camera tour."""
try:
self._api.tour(start=start)
except AmcrestError as error:
log_update_error(
_LOGGER, "start" if start else "stop", self.name, "camera tour", error
)
| {
"content_hash": "d173185f87af6d1d71ed827ca8ba6360",
"timestamp": "",
"source": "github",
"line_count": 603,
"max_line_length": 93,
"avg_line_length": 34.998341625207296,
"alnum_prop": 0.5974696739954511,
"repo_name": "titilambert/home-assistant",
"id": "5ac6acb2071baa1af7e0930c4e05449aa38ed342",
"size": "21104",
"binary": false,
"copies": "9",
"ref": "refs/heads/dev",
"path": "homeassistant/components/amcrest/camera.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
} |
"""Tests for the Renault integration."""
from __future__ import annotations
from types import MappingProxyType
from homeassistant.const import (
ATTR_ICON,
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_NAME,
ATTR_SW_VERSION,
STATE_UNAVAILABLE,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceRegistry
from homeassistant.helpers.entity_registry import EntityRegistry
from .const import DYNAMIC_ATTRIBUTES, FIXED_ATTRIBUTES, ICON_FOR_EMPTY_VALUES
def get_no_data_icon(expected_entity: MappingProxyType):
"""Check icon attribute for inactive sensors."""
entity_id = expected_entity["entity_id"]
return ICON_FOR_EMPTY_VALUES.get(entity_id, expected_entity.get(ATTR_ICON))
def check_device_registry(
device_registry: DeviceRegistry, expected_device: MappingProxyType
) -> None:
"""Ensure that the expected_device is correctly registered."""
assert len(device_registry.devices) == 1
registry_entry = device_registry.async_get_device(expected_device[ATTR_IDENTIFIERS])
assert registry_entry is not None
assert registry_entry.identifiers == expected_device[ATTR_IDENTIFIERS]
assert registry_entry.manufacturer == expected_device[ATTR_MANUFACTURER]
assert registry_entry.name == expected_device[ATTR_NAME]
assert registry_entry.model == expected_device[ATTR_MODEL]
assert registry_entry.sw_version == expected_device[ATTR_SW_VERSION]
def check_entities(
hass: HomeAssistant,
entity_registry: EntityRegistry,
expected_entities: MappingProxyType,
) -> None:
"""Ensure that the expected_entities are correct."""
for expected_entity in expected_entities:
entity_id = expected_entity["entity_id"]
registry_entry = entity_registry.entities.get(entity_id)
assert registry_entry is not None
assert registry_entry.unique_id == expected_entity["unique_id"]
state = hass.states.get(entity_id)
assert state.state == expected_entity["result"]
for attr in FIXED_ATTRIBUTES + DYNAMIC_ATTRIBUTES:
assert state.attributes.get(attr) == expected_entity.get(attr)
def check_entities_no_data(
hass: HomeAssistant,
entity_registry: EntityRegistry,
expected_entities: MappingProxyType,
expected_state: str,
) -> None:
"""Ensure that the expected_entities are correct."""
for expected_entity in expected_entities:
entity_id = expected_entity["entity_id"]
registry_entry = entity_registry.entities.get(entity_id)
assert registry_entry is not None
assert registry_entry.unique_id == expected_entity["unique_id"]
state = hass.states.get(entity_id)
assert state.state == expected_state
for attr in FIXED_ATTRIBUTES:
assert state.attributes.get(attr) == expected_entity.get(attr)
# Check dynamic attributes:
assert state.attributes.get(ATTR_ICON) == get_no_data_icon(expected_entity)
def check_entities_unavailable(
hass: HomeAssistant,
entity_registry: EntityRegistry,
expected_entities: MappingProxyType,
) -> None:
"""Ensure that the expected_entities are correct."""
for expected_entity in expected_entities:
entity_id = expected_entity["entity_id"]
registry_entry = entity_registry.entities.get(entity_id)
assert registry_entry is not None
assert registry_entry.unique_id == expected_entity["unique_id"]
state = hass.states.get(entity_id)
assert state.state == STATE_UNAVAILABLE
for attr in FIXED_ATTRIBUTES:
assert state.attributes.get(attr) == expected_entity.get(attr)
# Check dynamic attributes:
assert state.attributes.get(ATTR_ICON) == get_no_data_icon(expected_entity)
| {
"content_hash": "78ee9d4af4b4373292205f01590f3847",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 88,
"avg_line_length": 39.98947368421052,
"alnum_prop": 0.7104501184522243,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "7b3bb9e3d0ac4cb740e8dfbd2049fc53aea76e37",
"size": "3799",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/renault/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
from oslo_db.sqlalchemy import test_fixtures
import glance.tests.functional.db.migrations.test_pike_expand01 as tpe01
import glance.tests.utils as test_utils
# no TestPikeMigrate01Mixin class needed, can use TestPikeExpand01Mixin instead
class TestPikeMigrate01MySQL(
tpe01.TestPikeExpand01Mixin,
test_fixtures.OpportunisticDBTestMixin,
test_utils.BaseTestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
| {
"content_hash": "9c0fe423b225db2e190ccb2b252433f7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 79,
"avg_line_length": 29.066666666666666,
"alnum_prop": 0.8165137614678899,
"repo_name": "openstack/glance",
"id": "2e5a3d6ab3d5f32c27b44396f3c1fa532cfbbe67",
"size": "1009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/tests/functional/db/migrations/test_pike_migrate01.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1353"
},
{
"name": "Mako",
"bytes": "431"
},
{
"name": "Python",
"bytes": "4008354"
},
{
"name": "Shell",
"bytes": "3184"
}
],
"symlink_target": ""
} |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent90000 import *
class agilentDSAX93204A(agilent90000):
"Agilent Infiniium DSAX93204A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DSAX93204A')
super(agilentDSAX93204A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 32e9
self._init_channels()
| {
"content_hash": "d23a42bbe04535d38307ddbf1ca94c22",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 86,
"avg_line_length": 38.43181818181818,
"alnum_prop": 0.7374334713187463,
"repo_name": "Diti24/python-ivi",
"id": "1b0ddfa30b4dcb0dc674f2d7aab795ae3c1e9ce9",
"size": "1691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ivi/agilent/agilentDSAX93204A.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1992462"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import os
import re
from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.dataproc_v1.services.cluster_controller import pagers
from google.cloud.dataproc_v1.types import clusters
from google.cloud.dataproc_v1.types import operations
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from .transports.base import ClusterControllerTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import ClusterControllerGrpcTransport
from .transports.grpc_asyncio import ClusterControllerGrpcAsyncIOTransport
class ClusterControllerClientMeta(type):
"""Metaclass for the ClusterController client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ClusterControllerTransport]]
_transport_registry["grpc"] = ClusterControllerGrpcTransport
_transport_registry["grpc_asyncio"] = ClusterControllerGrpcAsyncIOTransport
def get_transport_class(
cls,
label: str = None,
) -> Type[ClusterControllerTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ClusterControllerClient(metaclass=ClusterControllerClientMeta):
"""The ClusterControllerService provides methods to manage
clusters of Compute Engine instances.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "dataproc.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ClusterControllerClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ClusterControllerClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ClusterControllerTransport:
"""Returns the transport used by the client instance.
Returns:
ClusterControllerTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def service_path(
project: str,
location: str,
service: str,
) -> str:
"""Returns a fully-qualified service string."""
return "projects/{project}/locations/{location}/services/{service}".format(
project=project,
location=location,
service=service,
)
@staticmethod
def parse_service_path(path: str) -> Dict[str, str]:
"""Parses a service path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/services/(?P<service>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ClusterControllerTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the cluster controller client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ClusterControllerTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ClusterControllerTransport):
# transport is a ClusterControllerTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
api_audience=client_options.api_audience,
)
def create_cluster(
self,
request: Union[clusters.CreateClusterRequest, dict] = None,
*,
project_id: str = None,
region: str = None,
cluster: clusters.Cluster = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates a cluster in a project. The returned
[Operation.metadata][google.longrunning.Operation.metadata] will
be
`ClusterOperationMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata>`__.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataproc_v1
def sample_create_cluster():
# Create a client
client = dataproc_v1.ClusterControllerClient()
# Initialize request argument(s)
cluster = dataproc_v1.Cluster()
cluster.project_id = "project_id_value"
cluster.cluster_name = "cluster_name_value"
request = dataproc_v1.CreateClusterRequest(
project_id="project_id_value",
region="region_value",
cluster=cluster,
)
# Make the request
operation = client.create_cluster(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataproc_v1.types.CreateClusterRequest, dict]):
The request object. A request to create a cluster.
project_id (str):
Required. The ID of the Google Cloud
Platform project that the cluster
belongs to.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
Required. The Dataproc region in
which to handle the request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster (google.cloud.dataproc_v1.types.Cluster):
Required. The cluster to create.
This corresponds to the ``cluster`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
a Dataproc cluster
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, region, cluster])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a clusters.CreateClusterRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, clusters.CreateClusterRequest):
request = clusters.CreateClusterRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if region is not None:
request.region = region
if cluster is not None:
request.cluster = cluster
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_cluster]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
("project_id", request.project_id),
("region", request.region),
)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
clusters.Cluster,
metadata_type=operations.ClusterOperationMetadata,
)
# Done; return the response.
return response
def update_cluster(
self,
request: Union[clusters.UpdateClusterRequest, dict] = None,
*,
project_id: str = None,
region: str = None,
cluster_name: str = None,
cluster: clusters.Cluster = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Updates a cluster in a project. The returned
[Operation.metadata][google.longrunning.Operation.metadata] will
be
`ClusterOperationMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata>`__.
The cluster must be in a
[``RUNNING``][google.cloud.dataproc.v1.ClusterStatus.State]
state or an error is returned.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataproc_v1
def sample_update_cluster():
# Create a client
client = dataproc_v1.ClusterControllerClient()
# Initialize request argument(s)
cluster = dataproc_v1.Cluster()
cluster.project_id = "project_id_value"
cluster.cluster_name = "cluster_name_value"
request = dataproc_v1.UpdateClusterRequest(
project_id="project_id_value",
region="region_value",
cluster_name="cluster_name_value",
cluster=cluster,
)
# Make the request
operation = client.update_cluster(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataproc_v1.types.UpdateClusterRequest, dict]):
The request object. A request to update a cluster.
project_id (str):
Required. The ID of the Google Cloud
Platform project the cluster belongs to.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
Required. The Dataproc region in
which to handle the request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_name (str):
Required. The cluster name.
This corresponds to the ``cluster_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster (google.cloud.dataproc_v1.types.Cluster):
Required. The changes to the cluster.
This corresponds to the ``cluster`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. Specifies the path, relative to ``Cluster``,
of the field to update. For example, to change the
number of workers in a cluster to 5, the ``update_mask``
parameter would be specified as
``config.worker_config.num_instances``, and the
``PATCH`` request body would specify the new value, as
follows:
::
{
"config":{
"workerConfig":{
"numInstances":"5"
}
}
}
Similarly, to change the number of preemptible workers
in a cluster to 5, the ``update_mask`` parameter would
be ``config.secondary_worker_config.num_instances``, and
the ``PATCH`` request body would be set as follows:
::
{
"config":{
"secondaryWorkerConfig":{
"numInstances":"5"
}
}
}
Note: Currently, only the following fields can be
updated:
.. raw:: html
<table>
<tbody>
<tr>
<td><strong>Mask</strong></td>
<td><strong>Purpose</strong></td>
</tr>
<tr>
<td><strong><em>labels</em></strong></td>
<td>Update labels</td>
</tr>
<tr>
<td><strong><em>config.worker_config.num_instances</em></strong></td>
<td>Resize primary worker group</td>
</tr>
<tr>
<td><strong><em>config.secondary_worker_config.num_instances</em></strong></td>
<td>Resize secondary worker group</td>
</tr>
<tr>
<td>config.autoscaling_config.policy_uri</td><td>Use, stop using, or
change autoscaling policies</td>
</tr>
</tbody>
</table>
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
a Dataproc cluster
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[project_id, region, cluster_name, cluster, update_mask]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a clusters.UpdateClusterRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, clusters.UpdateClusterRequest):
request = clusters.UpdateClusterRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if region is not None:
request.region = region
if cluster_name is not None:
request.cluster_name = cluster_name
if cluster is not None:
request.cluster = cluster
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_cluster]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
("project_id", request.project_id),
("region", request.region),
("cluster_name", request.cluster_name),
)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
clusters.Cluster,
metadata_type=operations.ClusterOperationMetadata,
)
# Done; return the response.
return response
def stop_cluster(
self,
request: Union[clusters.StopClusterRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Stops a cluster in a project.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataproc_v1
def sample_stop_cluster():
# Create a client
client = dataproc_v1.ClusterControllerClient()
# Initialize request argument(s)
request = dataproc_v1.StopClusterRequest(
project_id="project_id_value",
region="region_value",
cluster_name="cluster_name_value",
)
# Make the request
operation = client.stop_cluster(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataproc_v1.types.StopClusterRequest, dict]):
The request object. A request to stop a cluster.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
a Dataproc cluster
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a clusters.StopClusterRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, clusters.StopClusterRequest):
request = clusters.StopClusterRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.stop_cluster]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
("project_id", request.project_id),
("region", request.region),
("cluster_name", request.cluster_name),
)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
clusters.Cluster,
metadata_type=operations.ClusterOperationMetadata,
)
# Done; return the response.
return response
def start_cluster(
self,
request: Union[clusters.StartClusterRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Starts a cluster in a project.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataproc_v1
def sample_start_cluster():
# Create a client
client = dataproc_v1.ClusterControllerClient()
# Initialize request argument(s)
request = dataproc_v1.StartClusterRequest(
project_id="project_id_value",
region="region_value",
cluster_name="cluster_name_value",
)
# Make the request
operation = client.start_cluster(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataproc_v1.types.StartClusterRequest, dict]):
The request object. A request to start a cluster.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
a Dataproc cluster
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a clusters.StartClusterRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, clusters.StartClusterRequest):
request = clusters.StartClusterRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.start_cluster]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
("project_id", request.project_id),
("region", request.region),
("cluster_name", request.cluster_name),
)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
clusters.Cluster,
metadata_type=operations.ClusterOperationMetadata,
)
# Done; return the response.
return response
def delete_cluster(
self,
request: Union[clusters.DeleteClusterRequest, dict] = None,
*,
project_id: str = None,
region: str = None,
cluster_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes a cluster in a project. The returned
[Operation.metadata][google.longrunning.Operation.metadata] will
be
`ClusterOperationMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata>`__.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataproc_v1
def sample_delete_cluster():
# Create a client
client = dataproc_v1.ClusterControllerClient()
# Initialize request argument(s)
request = dataproc_v1.DeleteClusterRequest(
project_id="project_id_value",
region="region_value",
cluster_name="cluster_name_value",
)
# Make the request
operation = client.delete_cluster(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataproc_v1.types.DeleteClusterRequest, dict]):
The request object. A request to delete a cluster.
project_id (str):
Required. The ID of the Google Cloud
Platform project that the cluster
belongs to.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
Required. The Dataproc region in
which to handle the request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_name (str):
Required. The cluster name.
This corresponds to the ``cluster_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, region, cluster_name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a clusters.DeleteClusterRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, clusters.DeleteClusterRequest):
request = clusters.DeleteClusterRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if region is not None:
request.region = region
if cluster_name is not None:
request.cluster_name = cluster_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_cluster]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
("project_id", request.project_id),
("region", request.region),
("cluster_name", request.cluster_name),
)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=operations.ClusterOperationMetadata,
)
# Done; return the response.
return response
def get_cluster(
self,
request: Union[clusters.GetClusterRequest, dict] = None,
*,
project_id: str = None,
region: str = None,
cluster_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> clusters.Cluster:
r"""Gets the resource representation for a cluster in a
project.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataproc_v1
def sample_get_cluster():
# Create a client
client = dataproc_v1.ClusterControllerClient()
# Initialize request argument(s)
request = dataproc_v1.GetClusterRequest(
project_id="project_id_value",
region="region_value",
cluster_name="cluster_name_value",
)
# Make the request
response = client.get_cluster(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataproc_v1.types.GetClusterRequest, dict]):
The request object. Request to get the resource
representation for a cluster in a project.
project_id (str):
Required. The ID of the Google Cloud
Platform project that the cluster
belongs to.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
Required. The Dataproc region in
which to handle the request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_name (str):
Required. The cluster name.
This corresponds to the ``cluster_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataproc_v1.types.Cluster:
Describes the identifying
information, config, and status of a
Dataproc cluster
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, region, cluster_name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a clusters.GetClusterRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, clusters.GetClusterRequest):
request = clusters.GetClusterRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if region is not None:
request.region = region
if cluster_name is not None:
request.cluster_name = cluster_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_cluster]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
("project_id", request.project_id),
("region", request.region),
("cluster_name", request.cluster_name),
)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_clusters(
self,
request: Union[clusters.ListClustersRequest, dict] = None,
*,
project_id: str = None,
region: str = None,
filter: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListClustersPager:
r"""Lists all regions/{region}/clusters in a project
alphabetically.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataproc_v1
def sample_list_clusters():
# Create a client
client = dataproc_v1.ClusterControllerClient()
# Initialize request argument(s)
request = dataproc_v1.ListClustersRequest(
project_id="project_id_value",
region="region_value",
)
# Make the request
page_result = client.list_clusters(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.dataproc_v1.types.ListClustersRequest, dict]):
The request object. A request to list the clusters in a
project.
project_id (str):
Required. The ID of the Google Cloud
Platform project that the cluster
belongs to.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
Required. The Dataproc region in
which to handle the request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Optional. A filter constraining the clusters to list.
Filters are case-sensitive and have the following
syntax:
field = value [AND [field = value]] ...
where **field** is one of ``status.state``,
``clusterName``, or ``labels.[KEY]``, and ``[KEY]`` is a
label key. **value** can be ``*`` to match all values.
``status.state`` can be one of the following:
``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``,
``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE``
contains the ``CREATING``, ``UPDATING``, and ``RUNNING``
states. ``INACTIVE`` contains the ``DELETING`` and
``ERROR`` states. ``clusterName`` is the name of the
cluster provided at creation time. Only the logical
``AND`` operator is supported; space-separated items are
treated as having an implicit ``AND`` operator.
Example filter:
status.state = ACTIVE AND clusterName = mycluster AND
labels.env = staging AND labels.starred = \*
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataproc_v1.services.cluster_controller.pagers.ListClustersPager:
The list of all clusters in a
project.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, region, filter])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a clusters.ListClustersRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, clusters.ListClustersRequest):
request = clusters.ListClustersRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if region is not None:
request.region = region
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_clusters]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
("project_id", request.project_id),
("region", request.region),
)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListClustersPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def diagnose_cluster(
self,
request: Union[clusters.DiagnoseClusterRequest, dict] = None,
*,
project_id: str = None,
region: str = None,
cluster_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Gets cluster diagnostic information. The returned
[Operation.metadata][google.longrunning.Operation.metadata] will
be
`ClusterOperationMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata>`__.
After the operation completes,
[Operation.response][google.longrunning.Operation.response]
contains
`DiagnoseClusterResults <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults>`__.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataproc_v1
def sample_diagnose_cluster():
# Create a client
client = dataproc_v1.ClusterControllerClient()
# Initialize request argument(s)
request = dataproc_v1.DiagnoseClusterRequest(
project_id="project_id_value",
region="region_value",
cluster_name="cluster_name_value",
)
# Make the request
operation = client.diagnose_cluster(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataproc_v1.types.DiagnoseClusterRequest, dict]):
The request object. A request to collect cluster
diagnostic information.
project_id (str):
Required. The ID of the Google Cloud
Platform project that the cluster
belongs to.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
Required. The Dataproc region in
which to handle the request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_name (str):
Required. The cluster name.
This corresponds to the ``cluster_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.dataproc_v1.types.DiagnoseClusterResults`
The location of diagnostic output.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, region, cluster_name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a clusters.DiagnoseClusterRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, clusters.DiagnoseClusterRequest):
request = clusters.DiagnoseClusterRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if region is not None:
request.region = region
if cluster_name is not None:
request.cluster_name = cluster_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.diagnose_cluster]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
("project_id", request.project_id),
("region", request.region),
("cluster_name", request.cluster_name),
)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
clusters.DiagnoseClusterResults,
metadata_type=operations.ClusterOperationMetadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dataproc",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("ClusterControllerClient",)
| {
"content_hash": "641a8faad80512e12690b181b1344aaa",
"timestamp": "",
"source": "github",
"line_count": 1629,
"max_line_length": 171,
"avg_line_length": 40.41252302025783,
"alnum_prop": 0.5781534815895005,
"repo_name": "googleapis/python-dataproc",
"id": "aa4eb2ae46c2b005c9f990f37ef13ee58424b486",
"size": "66432",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/dataproc_v1/services/cluster_controller/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1606885"
},
{
"name": "Shell",
"bytes": "30666"
}
],
"symlink_target": ""
} |
'''
Created on 18/11/2018
@author: david
'''
import logging
import time
from threading import Thread
from sensor.imu6050 import Imu6050
class AngleProvider(object):
def __init__(self, period):
self._imu = Imu6050()
self._period = period
self._angle = 0.0
self._isRunning = False
self._imuReadThread = None
def start(self):
self._imu.start()
if self._imuReadThread == None or not self._imuReadThread.isAlive():
self._isRunning = True
self._imuReadThread = Thread(target=self._doImuRead)
self._imuReadThread.start()
def stop(self):
if self._imuReadThread != None and self._imuReadThread.isAlive():
self._isRunning = False
self._imuReadThread.join()
self._imu.stop()
def readAngleZ(self):
return self._angle
def _doImuRead(self):
self._imu.updateGyroTime()
while self._isRunning:
self._angle = self._imu.readAngleZ()
time.sleep(self._period)
logging.basicConfig(level=logging.INFO)
def emulateStabilization(imu, seconds):
logging.debug("{0:.3f}°".format(imu.readAngleZ()))
initTime = time.time()
ellapsedTime = 0.0
while ellapsedTime < seconds:
time.sleep(0.02)
imu.readAngleZ()
logging.debug("{0:.3f}°".format(imu.readAngleZ()))
ellapsedTime = time.time() - initTime
def keepAngle(imu, angle):
logging.info("Keep at {0:.3f}°".format(angle))
emulateStabilization(imu, 10.0)
logging.info("Angle after keeping: {0:.3f}".format(imu.readAngleZ()))
def turnToAngle(imu, angle):
logging.info("Turn to {0:.3f}°".format(angle))
emulateStabilization(imu, 3.0)
logging.info("Angle after turning: {0:.3f}".format(imu.readAngleZ()))
def testAngle(imu, angle):
turnToAngle(imu, angle)
keepAngle(imu, angle)
TRAVEL_THROTTLE = 30.0
logging.info("Initializing IMU")
imu = AngleProvider(0.02)
imu.start()
try:
logging.info("Starting")
keepAngle(imu, 0.0)
testAngle(imu, 45.0)
testAngle(imu, 90.0)
testAngle(imu, 45.0)
testAngle(imu, 0.0)
testAngle(imu, 45.0)
testAngle(imu, 90.0)
testAngle(imu, 45.0)
turnToAngle(imu, 0.0)
finally:
imu.stop()
logging.info("End")
| {
"content_hash": "c57749357f0028ea40c11608034804b1",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 76,
"avg_line_length": 21.094827586206897,
"alnum_prop": 0.5860237024928484,
"repo_name": "dpm76/Bot1",
"id": "8af6296de874c74aaea40e3e155d8b11f57bb056",
"size": "2451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot1/playground/imu6050_drift_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "244329"
},
{
"name": "Shell",
"bytes": "3779"
}
],
"symlink_target": ""
} |
"""Tests that do require database setup"""
| {
"content_hash": "048a3645020329517bdd79dee64df38a",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 42,
"avg_line_length": 43,
"alnum_prop": 0.7209302325581395,
"repo_name": "steder/goose",
"id": "c6b61235f2cfe3ad77e50492988fe1456a60f028",
"size": "43",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "goose/ftest/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40258"
}
],
"symlink_target": ""
} |
from QUANTAXIS.QAUtil import QA_util_log_info
from QUANTAXIS.QAUtil import QA_util_date_valid,QA_util_date_stamp
from pandas import DataFrame
from bson.objectid import ObjectId
import numpy
import datetime
"""
按要求从数据库取数据,并转换成dataframe结构
"""
def QA_fetch_data(code,startDate,endDate,collections):
#print(datetime.datetime.now())
startDate=str(startDate)[0:10]
endDate=str(endDate)[0:10]
coll=collections
if QA_util_date_valid(endDate)==True:
list_a=[[],[],[],[],[],[],[]]
for item in coll.find({'code':str(code)[0:6],"date_stamp":{"$lte":QA_util_date_stamp(endDate),"$gte":QA_util_date_stamp(startDate)}}):
#print(item['code'])
list_a[0].append(item['code'])
list_a[1].append(item['open'])
list_a[2].append(item['high'])
list_a[3].append(item['low'])
list_a[4].append(item['close'])
list_a[5].append(item['volume'])
list_a[6].append(item['date'])
#print(datetime.datetime.now())
data=numpy.asarray(list_a).transpose()
#data=DataFrame(list_a).transpose()
#print(datetime.datetime.now())
return data
else:
QA_util_log_info('something wrong with date')
| {
"content_hash": "e840427fb056f36f41b84374bfb2b034",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 142,
"avg_line_length": 33.567567567567565,
"alnum_prop": 0.6111111111111112,
"repo_name": "lijiabogithub/QUANTAXIS",
"id": "9672a47407de8d466d373a6311d2bbda8119fb0c",
"size": "1294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "QUANTAXIS/QAFetch/QAQuery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "32445"
},
{
"name": "HTML",
"bytes": "1166"
},
{
"name": "JavaScript",
"bytes": "92065"
},
{
"name": "Python",
"bytes": "231114"
},
{
"name": "Vue",
"bytes": "170066"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from users import views
urlpatterns = patterns('',
# URL pattern for the UserListView # noqa
url(
regex=r'^$',
view=views.UserListView.as_view(),
name='list'
),
# URL pattern for the UserRedirectView
url(
regex=r'^~redirect/$',
view=views.UserRedirectView.as_view(),
name='redirect'
),
# URL pattern for the UserDetailView
url(
regex=r'^(?P<username>[\w.@+-]+)/$',
view=views.UserDetailView.as_view(),
name='detail'
),
# URL pattern for the UserUpdateView
url(
regex=r'^~update/$',
view=views.UserUpdateView.as_view(),
name='update'
),
url(
regex=r'^~review/(?P<slug>[\w.@+-]+)$',
view=views.UserToUserDetailView.as_view(),
name='review'
),
) | {
"content_hash": "f34db0cca83722e897c309b4724c8dcd",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 50,
"avg_line_length": 24.685714285714287,
"alnum_prop": 0.5555555555555556,
"repo_name": "c-rhodes/hack2014",
"id": "0283cfebecc23157daaca0aa1102024b940964d3",
"size": "888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hack2014/users/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "104715"
},
{
"name": "JavaScript",
"bytes": "2341"
},
{
"name": "Python",
"bytes": "46831"
},
{
"name": "Shell",
"bytes": "5099"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Confirmation'
db.create_table('viveum_confirmation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('order', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['synthesa.Order'])),
('status', self.gf('django.db.models.fields.IntegerField')()),
('acceptance', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)),
('payid', self.gf('django.db.models.fields.IntegerField')()),
('merchant_comment', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('ncerror', self.gf('django.db.models.fields.IntegerField')()),
('cn', self.gf('django.db.models.fields.CharField')(max_length=255)),
('amount', self.gf('django.db.models.fields.DecimalField')(default='0.0', max_digits=30, decimal_places=2)),
('ipcty', self.gf('django.db.models.fields.CharField')(max_length=2, blank=True)),
('currency', self.gf('django.db.models.fields.CharField')(max_length=3)),
('cardno', self.gf('django.db.models.fields.CharField')(max_length=21)),
('brand', self.gf('django.db.models.fields.CharField')(max_length=25)),
('origin', self.gf('django.db.models.fields.CharField')(max_length=10)),
))
db.send_create_signal('viveum', ['Confirmation'])
def backwards(self, orm):
# Deleting model 'Confirmation'
db.delete_table('viveum_confirmation')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'synthesa.order': {
'Meta': {'object_name': 'Order'},
'billing_address_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'cart_pk': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'order_subtotal': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '30', 'decimal_places': '2'}),
'order_total': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '30', 'decimal_places': '2'}),
'shipping_address_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'viveum.confirmation': {
'Meta': {'object_name': 'Confirmation'},
'acceptance': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '30', 'decimal_places': '2'}),
'brand': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'cardno': ('django.db.models.fields.CharField', [], {'max_length': '21'}),
'cn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipcty': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'merchant_comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ncerror': ('django.db.models.fields.IntegerField', [], {}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['synthesa.Order']"}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'payid': ('django.db.models.fields.IntegerField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['viveum'] | {
"content_hash": "ca62bae1d0e0beda0955acf5d0708f39",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 182,
"avg_line_length": 71.79807692307692,
"alnum_prop": 0.5607338958082229,
"repo_name": "philippeowagner/django-shop-viveum",
"id": "b9286b9de9fd4af4d974c7af7b512d876115ec30",
"size": "7491",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "viveum/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3839"
},
{
"name": "Python",
"bytes": "35353"
},
{
"name": "Shell",
"bytes": "1209"
}
],
"symlink_target": ""
} |
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from scrapy.linkextractors import LinkExtractor
from censible_links.items import Page
class CensibleCrawlSpider(CrawlSpider):
name = 'censible_crawl'
allowed_domains = ['censible.co']
start_urls = ['https://censible.co/']
rules = (
Rule(LinkExtractor(allow=('.*')), callback='parse_item', follow=True),
)
def parse_item(self, response):
self.logger.info('Response from %s ', response.url)
# Scraping item (page) content
item = Page()
selector = Selector(response)
item['title'] = selector.xpath('//title/text()').extract()
item['h1'] = selector.xpath('//h1/text()').extract()
item['href'] = selector.xpath('//a/@href').extract()
item['meta'] = selector.xpath('//meta[@name=\'description\']/@content').extract()
return item
| {
"content_hash": "afcdf1ac3c9bf5c27cd3ab76438b4b0c",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 89,
"avg_line_length": 33.888888888888886,
"alnum_prop": 0.6371584699453552,
"repo_name": "pmart123/censible_links",
"id": "1add8f8cd5ee3a14ea0feb8cc872d08dadacdfc8",
"size": "915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "censible_links/spiders.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2028"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dream.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "f3f2515c4ef0013358f821fb633ffa26",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 69,
"avg_line_length": 25.11111111111111,
"alnum_prop": 0.7079646017699115,
"repo_name": "Yuecai/com-yuecai-dream",
"id": "8d136d71665ef47b34c195bad5e186f0f206e51f",
"size": "248",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/manage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9348"
},
{
"name": "JavaScript",
"bytes": "89384"
},
{
"name": "Python",
"bytes": "125844"
}
],
"symlink_target": ""
} |
"""
Version agnostic psutil hack to fully support both old (<2.0) and new (>=2.0)
psutil versions.
The old <1.0 psutil API is dropped in psutil 3.0
Should be removed once support for psutil <2.0 is dropped. (eg RHEL 6)
Built off of http://grodola.blogspot.com/2014/01/psutil-20-porting.html
"""
# No exception handling, as we want ImportError if psutil doesn't exist
import psutil # pylint: disable=3rd-party-module-not-gated
if psutil.version_info >= (2, 0):
from psutil import * # pylint: disable=wildcard-import,unused-wildcard-import,3rd-party-module-not-gated
else:
# Import hack to work around bugs in old psutil's
# Psuedo "from psutil import *"
_globals = globals()
for attr in psutil.__all__:
_temp = __import__("psutil", globals(), locals(), [attr], 0)
try:
_globals[attr] = getattr(_temp, attr)
except AttributeError:
pass
# Import functions not in __all__
# pylint: disable=unused-import,3rd-party-module-not-gated
from psutil import disk_partitions, disk_usage
# pylint: enable=unused-import,3rd-party-module-not-gated
# Alias new module functions
def boot_time():
return psutil.BOOT_TIME
def cpu_count():
return psutil.NUM_CPUS
# Alias renamed module functions
pids = psutil.get_pid_list
try:
users = psutil.get_users
except AttributeError:
users = lambda: (_ for _ in ()).throw(
NotImplementedError("Your psutil version is too old")
)
# Deprecated in 1.0.1, but not mentioned in blog post
if psutil.version_info < (1, 0, 1):
net_io_counters = psutil.network_io_counters()
class Process(psutil.Process): # pylint: disable=no-init
# Reimplement overloaded getters/setters
# pylint: disable=arguments-differ
def cpu_affinity(self, *args, **kwargs):
if args or kwargs:
return self.set_cpu_affinity(*args, **kwargs)
else:
return self.get_cpu_affinity()
def ionice(self, *args, **kwargs):
if args or kwargs:
return self.set_ionice(*args, **kwargs)
else:
return self.get_ionice()
def nice(self, *args, **kwargs):
if args or kwargs:
return self.set_nice(*args, **kwargs)
else:
return self.get_nice()
def rlimit(self, *args, **kwargs):
"""
set_rlimit and get_limit were not introduced until psutil v1.1.0
"""
if psutil.version_info >= (1, 1, 0):
if args or kwargs:
return self.set_rlimit(*args, **kwargs)
else:
return self.get_rlimit()
else:
pass
# pylint: enable=arguments-differ
# Alias renamed Process functions
_PROCESS_FUNCTION_MAP = {
"children": "get_children",
"connections": "get_connections",
"cpu_percent": "get_cpu_percent",
"cpu_times": "get_cpu_times",
"io_counters": "get_io_counters",
"memory_info": "get_memory_info",
"memory_info_ex": "get_ext_memory_info",
"memory_maps": "get_memory_maps",
"memory_percent": "get_memory_percent",
"num_ctx_switches": "get_num_ctx_switches",
"num_fds": "get_num_fds",
"num_threads": "get_num_threads",
"open_files": "get_open_files",
"threads": "get_threads",
"cwd": "getcwd",
}
for new, old in _PROCESS_FUNCTION_MAP.items():
try:
setattr(Process, new, psutil.Process.__dict__[old])
except KeyError:
pass
| {
"content_hash": "5c4a39a3243869b1a901bb977434dfe7",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 109,
"avg_line_length": 33.17857142857143,
"alnum_prop": 0.5777717976318623,
"repo_name": "saltstack/salt",
"id": "e6684e5ca46fb262dc082562f6da73939ee08f02",
"size": "3716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/utils/psutil_compat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
} |
"""The schema module provides the building blocks for database metadata.
Each element within this module describes a database entity which can be
created and dropped, or is otherwise part of such an entity. Examples include
tables, columns, sequences, and indexes.
All entities are subclasses of :class:`~sqlalchemy.schema.SchemaItem`, and as
defined in this module they are intended to be agnostic of any vendor-specific
constructs.
A collection of entities are grouped into a unit called
:class:`~sqlalchemy.schema.MetaData`. MetaData serves as a logical grouping of
schema elements, and can also be associated with an actual database connection
such that operations involving the contained elements can contact the database
as needed.
Two of the elements here also build upon their "syntactic" counterparts, which
are defined in :class:`~sqlalchemy.sql.expression.`, specifically
:class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.Column`.
Since these objects are part of the SQL expression language, they are usable
as components in SQL expressions.
"""
from __future__ import annotations
from abc import ABC
import collections
from enum import Enum
import operator
import typing
from typing import Any
from typing import Callable
from typing import cast
from typing import Collection
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import NoReturn
from typing import Optional
from typing import overload
from typing import Sequence as _typing_Sequence
from typing import Set
from typing import Tuple
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from . import coercions
from . import ddl
from . import roles
from . import type_api
from . import visitors
from .base import DedupeColumnCollection
from .base import DialectKWArgs
from .base import Executable
from .base import SchemaEventTarget as SchemaEventTarget
from .coercions import _document_text_coercion
from .elements import ClauseElement
from .elements import ColumnClause
from .elements import ColumnElement
from .elements import quoted_name
from .elements import TextClause
from .selectable import TableClause
from .type_api import to_instance
from .visitors import ExternallyTraversible
from .visitors import InternalTraversal
from .. import event
from .. import exc
from .. import inspection
from .. import util
from ..util.typing import Final
from ..util.typing import Literal
from ..util.typing import Protocol
from ..util.typing import Self
from ..util.typing import TypeGuard
if typing.TYPE_CHECKING:
from ._typing import _DDLColumnArgument
from ._typing import _InfoType
from ._typing import _TextCoercedExpressionArgument
from ._typing import _TypeEngineArgument
from .base import ReadOnlyColumnCollection
from .compiler import DDLCompiler
from .elements import BindParameter
from .functions import Function
from .type_api import TypeEngine
from .visitors import _TraverseInternalsType
from .visitors import anon_map
from ..engine import Connection
from ..engine import Engine
from ..engine.interfaces import _CoreMultiExecuteParams
from ..engine.interfaces import CoreExecuteOptionsParameter
from ..engine.interfaces import ExecutionContext
from ..engine.mock import MockConnection
from ..engine.reflection import _ReflectionInfo
from ..sql.selectable import FromClause
_T = TypeVar("_T", bound="Any")
_SI = TypeVar("_SI", bound="SchemaItem")
_ServerDefaultType = Union["FetchedValue", str, TextClause, ColumnElement[Any]]
_TAB = TypeVar("_TAB", bound="Table")
_CreateDropBind = Union["Engine", "Connection", "MockConnection"]
class SchemaConst(Enum):
RETAIN_SCHEMA = 1
"""Symbol indicating that a :class:`_schema.Table`, :class:`.Sequence`
or in some cases a :class:`_schema.ForeignKey` object, in situations
where the object is being copied for a :meth:`.Table.to_metadata`
operation, should retain the schema name that it already has.
"""
BLANK_SCHEMA = 2
"""Symbol indicating that a :class:`_schema.Table` or :class:`.Sequence`
should have 'None' for its schema, even if the parent
:class:`_schema.MetaData` has specified a schema.
.. seealso::
:paramref:`_schema.MetaData.schema`
:paramref:`_schema.Table.schema`
:paramref:`.Sequence.schema`
.. versionadded:: 1.0.14
"""
NULL_UNSPECIFIED = 3
"""Symbol indicating the "nullable" keyword was not passed to a Column.
This is used to distinguish between the use case of passing
``nullable=None`` to a :class:`.Column`, which has special meaning
on some backends such as SQL Server.
"""
RETAIN_SCHEMA: Final[
Literal[SchemaConst.RETAIN_SCHEMA]
] = SchemaConst.RETAIN_SCHEMA
BLANK_SCHEMA: Final[
Literal[SchemaConst.BLANK_SCHEMA]
] = SchemaConst.BLANK_SCHEMA
NULL_UNSPECIFIED: Final[
Literal[SchemaConst.NULL_UNSPECIFIED]
] = SchemaConst.NULL_UNSPECIFIED
def _get_table_key(name: str, schema: Optional[str]) -> str:
if schema is None:
return name
else:
return schema + "." + name
# this should really be in sql/util.py but we'd have to
# break an import cycle
def _copy_expression(
expression: ColumnElement[Any],
source_table: Optional[Table],
target_table: Optional[Table],
) -> ColumnElement[Any]:
if source_table is None or target_table is None:
return expression
fixed_source_table = source_table
fixed_target_table = target_table
def replace(
element: ExternallyTraversible, **kw: Any
) -> Optional[ExternallyTraversible]:
if (
isinstance(element, Column)
and element.table is fixed_source_table
and element.key in fixed_source_table.c
):
return fixed_target_table.c[element.key]
else:
return None
return cast(
ColumnElement[Any],
visitors.replacement_traverse(expression, {}, replace),
)
@inspection._self_inspects
class SchemaItem(SchemaEventTarget, visitors.Visitable):
"""Base class for items that define a database schema."""
__visit_name__ = "schema_item"
create_drop_stringify_dialect = "default"
def _init_items(self, *args: SchemaItem, **kw: Any) -> None:
"""Initialize the list of child items for this SchemaItem."""
for item in args:
if item is not None:
try:
spwd = item._set_parent_with_dispatch
except AttributeError as err:
raise exc.ArgumentError(
"'SchemaItem' object, such as a 'Column' or a "
"'Constraint' expected, got %r" % item
) from err
else:
spwd(self, **kw)
def __repr__(self) -> str:
return util.generic_repr(self, omit_kwarg=["info"])
@util.memoized_property
def info(self) -> _InfoType:
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.SchemaItem`.
The dictionary is automatically generated when first accessed.
It can also be specified in the constructor of some objects,
such as :class:`_schema.Table` and :class:`_schema.Column`.
"""
return {}
def _schema_item_copy(self, schema_item: _SI) -> _SI:
if "info" in self.__dict__:
schema_item.info = self.info.copy()
schema_item.dispatch._update(self.dispatch)
return schema_item
_use_schema_map = True
SelfHasConditionalDDL = TypeVar(
"SelfHasConditionalDDL", bound="HasConditionalDDL"
)
class HasConditionalDDL:
"""define a class that includes the :meth:`.HasConditionalDDL.ddl_if`
method, allowing for conditional rendering of DDL.
Currently applies to constraints and indexes.
.. versionadded:: 2.0
"""
_ddl_if: Optional[ddl.DDLIf] = None
def ddl_if(
self: SelfHasConditionalDDL,
dialect: Optional[str] = None,
callable_: Optional[ddl.DDLIfCallable] = None,
state: Optional[Any] = None,
) -> SelfHasConditionalDDL:
r"""apply a conditional DDL rule to this schema item.
These rules work in a similar manner to the
:meth:`.ExecutableDDLElement.execute_if` callable, with the added
feature that the criteria may be checked within the DDL compilation
phase for a construct such as :class:`.CreateTable`.
:meth:`.HasConditionalDDL.ddl_if` currently applies towards the
:class:`.Index` construct as well as all :class:`.Constraint`
constructs.
:param dialect: string name of a dialect, or a tuple of string names
to indicate multiple dialect types.
:param callable\_: a callable that is constructed using the same form
as that described in
:paramref:`.ExecutableDDLElement.execute_if.callable_`.
:param state: any arbitrary object that will be passed to the
callable, if present.
.. versionadded:: 2.0
.. seealso::
:ref:`schema_ddl_ddl_if` - background and usage examples
"""
self._ddl_if = ddl.DDLIf(dialect, callable_, state)
return self
class HasSchemaAttr(SchemaItem):
"""schema item that includes a top-level schema name"""
schema: Optional[str]
class Table(
DialectKWArgs, HasSchemaAttr, TableClause, inspection.Inspectable["Table"]
):
r"""Represent a table in a database.
e.g.::
mytable = Table(
"mytable", metadata,
Column('mytable_id', Integer, primary_key=True),
Column('value', String(50))
)
The :class:`_schema.Table`
object constructs a unique instance of itself based
on its name and optional schema name within the given
:class:`_schema.MetaData` object. Calling the :class:`_schema.Table`
constructor with the same name and same :class:`_schema.MetaData` argument
a second time will return the *same* :class:`_schema.Table`
object - in this way
the :class:`_schema.Table` constructor acts as a registry function.
.. seealso::
:ref:`metadata_describing` - Introduction to database metadata
"""
__visit_name__ = "table"
if TYPE_CHECKING:
@util.ro_non_memoized_property
def primary_key(self) -> PrimaryKeyConstraint:
...
@util.ro_non_memoized_property
def foreign_keys(self) -> Set[ForeignKey]:
...
_columns: DedupeColumnCollection[Column[Any]]
constraints: Set[Constraint]
"""A collection of all :class:`_schema.Constraint` objects associated with
this :class:`_schema.Table`.
Includes :class:`_schema.PrimaryKeyConstraint`,
:class:`_schema.ForeignKeyConstraint`, :class:`_schema.UniqueConstraint`,
:class:`_schema.CheckConstraint`. A separate collection
:attr:`_schema.Table.foreign_key_constraints` refers to the collection
of all :class:`_schema.ForeignKeyConstraint` objects, and the
:attr:`_schema.Table.primary_key` attribute refers to the single
:class:`_schema.PrimaryKeyConstraint` associated with the
:class:`_schema.Table`.
.. seealso::
:attr:`_schema.Table.constraints`
:attr:`_schema.Table.primary_key`
:attr:`_schema.Table.foreign_key_constraints`
:attr:`_schema.Table.indexes`
:class:`_reflection.Inspector`
"""
indexes: Set[Index]
"""A collection of all :class:`_schema.Index` objects associated with this
:class:`_schema.Table`.
.. seealso::
:meth:`_reflection.Inspector.get_indexes`
"""
_traverse_internals: _TraverseInternalsType = (
TableClause._traverse_internals
+ [("schema", InternalTraversal.dp_string)]
)
if TYPE_CHECKING:
@util.ro_non_memoized_property
def columns(self) -> ReadOnlyColumnCollection[str, Column[Any]]:
...
@util.ro_non_memoized_property
def exported_columns(
self,
) -> ReadOnlyColumnCollection[str, Column[Any]]:
...
@util.ro_non_memoized_property
def c(self) -> ReadOnlyColumnCollection[str, Column[Any]]:
...
def _gen_cache_key(
self, anon_map: anon_map, bindparams: List[BindParameter[Any]]
) -> Tuple[Any, ...]:
if self._annotations:
return (self,) + self._annotations_cache_key
else:
return (self,)
if not typing.TYPE_CHECKING:
# typing tools seem to be inconsistent in how they handle
# __new__, so suggest this pattern for classes that use
# __new__. apply typing to the __init__ method normally
@util.deprecated_params(
mustexist=(
"1.4",
"Deprecated alias of :paramref:`_schema.Table.must_exist`",
),
)
def __new__(cls, *args: Any, **kw: Any) -> Any:
return cls._new(*args, **kw)
@classmethod
def _new(cls, *args: Any, **kw: Any) -> Any:
if not args and not kw:
# python3k pickle seems to call this
return object.__new__(cls)
try:
name, metadata, args = args[0], args[1], args[2:]
except IndexError:
raise TypeError(
"Table() takes at least two positional-only "
"arguments 'name' and 'metadata'"
)
schema = kw.get("schema", None)
if schema is None:
schema = metadata.schema
elif schema is BLANK_SCHEMA:
schema = None
keep_existing = kw.get("keep_existing", False)
extend_existing = kw.get("extend_existing", False)
if keep_existing and extend_existing:
msg = "keep_existing and extend_existing are mutually exclusive."
raise exc.ArgumentError(msg)
must_exist = kw.pop("must_exist", kw.pop("mustexist", False))
key = _get_table_key(name, schema)
if key in metadata.tables:
if not keep_existing and not extend_existing and bool(args):
raise exc.InvalidRequestError(
"Table '%s' is already defined for this MetaData "
"instance. Specify 'extend_existing=True' "
"to redefine "
"options and columns on an "
"existing Table object." % key
)
table = metadata.tables[key]
if extend_existing:
table._init_existing(*args, **kw)
return table
else:
if must_exist:
raise exc.InvalidRequestError("Table '%s' not defined" % (key))
table = object.__new__(cls)
table.dispatch.before_parent_attach(table, metadata)
metadata._add_table(name, schema, table)
try:
table.__init__(name, metadata, *args, _no_init=False, **kw)
table.dispatch.after_parent_attach(table, metadata)
return table
except Exception:
with util.safe_reraise():
metadata._remove_table(name, schema)
def __init__(
self,
name: str,
metadata: MetaData,
*args: SchemaItem,
schema: Optional[Union[str, Literal[SchemaConst.BLANK_SCHEMA]]] = None,
quote: Optional[bool] = None,
quote_schema: Optional[bool] = None,
autoload_with: Optional[Union[Engine, Connection]] = None,
autoload_replace: bool = True,
keep_existing: bool = False,
extend_existing: bool = False,
resolve_fks: bool = True,
include_columns: Optional[Collection[str]] = None,
implicit_returning: bool = True,
comment: Optional[str] = None,
info: Optional[Dict[Any, Any]] = None,
listeners: Optional[
_typing_Sequence[Tuple[str, Callable[..., Any]]]
] = None,
prefixes: Optional[_typing_Sequence[str]] = None,
# used internally in the metadata.reflect() process
_extend_on: Optional[Set[Table]] = None,
# used by __new__ to bypass __init__
_no_init: bool = True,
# dialect-specific keyword args
**kw: Any,
) -> None:
r"""Constructor for :class:`_schema.Table`.
:param name: The name of this table as represented in the database.
The table name, along with the value of the ``schema`` parameter,
forms a key which uniquely identifies this :class:`_schema.Table`
within
the owning :class:`_schema.MetaData` collection.
Additional calls to :class:`_schema.Table` with the same name,
metadata,
and schema name will return the same :class:`_schema.Table` object.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word or contain special characters.
A name with any number of upper case characters is considered
to be case sensitive, and will be sent as quoted.
To enable unconditional quoting for the table name, specify the flag
``quote=True`` to the constructor, or use the :class:`.quoted_name`
construct to specify the name.
:param metadata: a :class:`_schema.MetaData`
object which will contain this
table. The metadata is used as a point of association of this table
with other tables which are referenced via foreign key. It also
may be used to associate this table with a particular
:class:`.Connection` or :class:`.Engine`.
:param \*args: Additional positional arguments are used primarily
to add the list of :class:`_schema.Column`
objects contained within this
table. Similar to the style of a CREATE TABLE statement, other
:class:`.SchemaItem` constructs may be added here, including
:class:`.PrimaryKeyConstraint`, and
:class:`_schema.ForeignKeyConstraint`.
:param autoload_replace: Defaults to ``True``; when using
:paramref:`_schema.Table.autoload_with`
in conjunction with :paramref:`_schema.Table.extend_existing`,
indicates
that :class:`_schema.Column` objects present in the already-existing
:class:`_schema.Table`
object should be replaced with columns of the same
name retrieved from the autoload process. When ``False``, columns
already present under existing names will be omitted from the
reflection process.
Note that this setting does not impact :class:`_schema.Column` objects
specified programmatically within the call to :class:`_schema.Table`
that
also is autoloading; those :class:`_schema.Column` objects will always
replace existing columns of the same name when
:paramref:`_schema.Table.extend_existing` is ``True``.
.. seealso::
:paramref:`_schema.Table.autoload_with`
:paramref:`_schema.Table.extend_existing`
:param autoload_with: An :class:`_engine.Engine` or
:class:`_engine.Connection` object,
or a :class:`_reflection.Inspector` object as returned by
:func:`_sa.inspect`
against one, with which this :class:`_schema.Table`
object will be reflected.
When set to a non-None value, the autoload process will take place
for this table against the given engine or connection.
:param extend_existing: When ``True``, indicates that if this
:class:`_schema.Table` is already present in the given
:class:`_schema.MetaData`,
apply further arguments within the constructor to the existing
:class:`_schema.Table`.
If :paramref:`_schema.Table.extend_existing` or
:paramref:`_schema.Table.keep_existing` are not set,
and the given name
of the new :class:`_schema.Table` refers to a :class:`_schema.Table`
that is
already present in the target :class:`_schema.MetaData` collection,
and
this :class:`_schema.Table`
specifies additional columns or other constructs
or flags that modify the table's state, an
error is raised. The purpose of these two mutually-exclusive flags
is to specify what action should be taken when a
:class:`_schema.Table`
is specified that matches an existing :class:`_schema.Table`,
yet specifies
additional constructs.
:paramref:`_schema.Table.extend_existing`
will also work in conjunction
with :paramref:`_schema.Table.autoload_with` to run a new reflection
operation against the database, even if a :class:`_schema.Table`
of the same name is already present in the target
:class:`_schema.MetaData`; newly reflected :class:`_schema.Column`
objects
and other options will be added into the state of the
:class:`_schema.Table`, potentially overwriting existing columns
and options of the same name.
As is always the case with :paramref:`_schema.Table.autoload_with`,
:class:`_schema.Column` objects can be specified in the same
:class:`_schema.Table`
constructor, which will take precedence. Below, the existing
table ``mytable`` will be augmented with :class:`_schema.Column`
objects
both reflected from the database, as well as the given
:class:`_schema.Column`
named "y"::
Table("mytable", metadata,
Column('y', Integer),
extend_existing=True,
autoload_with=engine
)
.. seealso::
:paramref:`_schema.Table.autoload_with`
:paramref:`_schema.Table.autoload_replace`
:paramref:`_schema.Table.keep_existing`
:param implicit_returning: True by default - indicates that
RETURNING can be used, typically by the ORM, in order to fetch
server-generated values such as primary key values and
server side defaults, on those backends which support RETURNING.
In modern SQLAlchemy there is generally no reason to alter this
setting, except for some backend specific cases
(see :ref:`mssql_triggers` in the SQL Server dialect documentation
for one such example).
:param include_columns: A list of strings indicating a subset of
columns to be loaded via the ``autoload`` operation; table columns who
aren't present in this list will not be represented on the resulting
``Table`` object. Defaults to ``None`` which indicates all columns
should be reflected.
:param resolve_fks: Whether or not to reflect :class:`_schema.Table`
objects
related to this one via :class:`_schema.ForeignKey` objects, when
:paramref:`_schema.Table.autoload_with` is
specified. Defaults to True. Set to False to disable reflection of
related tables as :class:`_schema.ForeignKey`
objects are encountered; may be
used either to save on SQL calls or to avoid issues with related tables
that can't be accessed. Note that if a related table is already present
in the :class:`_schema.MetaData` collection, or becomes present later,
a
:class:`_schema.ForeignKey` object associated with this
:class:`_schema.Table` will
resolve to that table normally.
.. versionadded:: 1.3
.. seealso::
:paramref:`.MetaData.reflect.resolve_fks`
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param keep_existing: When ``True``, indicates that if this Table
is already present in the given :class:`_schema.MetaData`, ignore
further arguments within the constructor to the existing
:class:`_schema.Table`, and return the :class:`_schema.Table`
object as
originally created. This is to allow a function that wishes
to define a new :class:`_schema.Table` on first call, but on
subsequent calls will return the same :class:`_schema.Table`,
without any of the declarations (particularly constraints)
being applied a second time.
If :paramref:`_schema.Table.extend_existing` or
:paramref:`_schema.Table.keep_existing` are not set,
and the given name
of the new :class:`_schema.Table` refers to a :class:`_schema.Table`
that is
already present in the target :class:`_schema.MetaData` collection,
and
this :class:`_schema.Table`
specifies additional columns or other constructs
or flags that modify the table's state, an
error is raised. The purpose of these two mutually-exclusive flags
is to specify what action should be taken when a
:class:`_schema.Table`
is specified that matches an existing :class:`_schema.Table`,
yet specifies
additional constructs.
.. seealso::
:paramref:`_schema.Table.extend_existing`
:param listeners: A list of tuples of the form ``(<eventname>, <fn>)``
which will be passed to :func:`.event.listen` upon construction.
This alternate hook to :func:`.event.listen` allows the establishment
of a listener function specific to this :class:`_schema.Table` before
the "autoload" process begins. Historically this has been intended
for use with the :meth:`.DDLEvents.column_reflect` event, however
note that this event hook may now be associated with the
:class:`_schema.MetaData` object directly::
def listen_for_reflect(table, column_info):
"handle the column reflection event"
# ...
t = Table(
'sometable',
autoload_with=engine,
listeners=[
('column_reflect', listen_for_reflect)
])
.. seealso::
:meth:`_events.DDLEvents.column_reflect`
:param must_exist: When ``True``, indicates that this Table must already
be present in the given :class:`_schema.MetaData` collection, else
an exception is raised.
:param prefixes:
A list of strings to insert after CREATE in the CREATE TABLE
statement. They will be separated by spaces.
:param quote: Force quoting of this table's name on or off, corresponding
to ``True`` or ``False``. When left at its default of ``None``,
the column identifier will be quoted according to whether the name is
case sensitive (identifiers with at least one upper case character are
treated as case sensitive), or if it's a reserved word. This flag
is only needed to force quoting of a reserved word which is not known
by the SQLAlchemy dialect.
.. note:: setting this flag to ``False`` will not provide
case-insensitive behavior for table reflection; table reflection
will always search for a mixed-case name in a case sensitive
fashion. Case insensitive names are specified in SQLAlchemy only
by stating the name with all lower case characters.
:param quote_schema: same as 'quote' but applies to the schema identifier.
:param schema: The schema name for this table, which is required if
the table resides in a schema other than the default selected schema
for the engine's database connection. Defaults to ``None``.
If the owning :class:`_schema.MetaData` of this :class:`_schema.Table`
specifies its
own :paramref:`_schema.MetaData.schema` parameter,
then that schema name will
be applied to this :class:`_schema.Table`
if the schema parameter here is set
to ``None``. To set a blank schema name on a :class:`_schema.Table`
that
would otherwise use the schema set on the owning
:class:`_schema.MetaData`,
specify the special symbol :attr:`.BLANK_SCHEMA`.
.. versionadded:: 1.0.14 Added the :attr:`.BLANK_SCHEMA` symbol to
allow a :class:`_schema.Table`
to have a blank schema name even when the
parent :class:`_schema.MetaData` specifies
:paramref:`_schema.MetaData.schema`.
The quoting rules for the schema name are the same as those for the
``name`` parameter, in that quoting is applied for reserved words or
case-sensitive names; to enable unconditional quoting for the schema
name, specify the flag ``quote_schema=True`` to the constructor, or use
the :class:`.quoted_name` construct to specify the name.
:param comment: Optional string that will render an SQL comment on table
creation.
.. versionadded:: 1.2 Added the :paramref:`_schema.Table.comment`
parameter
to :class:`_schema.Table`.
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form ``<dialectname>_<argname>``.
See the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
""" # noqa: E501
if _no_init:
# don't run __init__ from __new__ by default;
# __new__ has a specific place that __init__ is called
return
super().__init__(quoted_name(name, quote))
self.metadata = metadata
if schema is None:
self.schema = metadata.schema
elif schema is BLANK_SCHEMA:
self.schema = None
else:
quote_schema = quote_schema
assert isinstance(schema, str)
self.schema = quoted_name(schema, quote_schema)
self.indexes = set()
self.constraints = set()
PrimaryKeyConstraint(
_implicit_generated=True
)._set_parent_with_dispatch(self)
self.foreign_keys = set() # type: ignore
self._extra_dependencies: Set[Table] = set()
if self.schema is not None:
self.fullname = "%s.%s" % (self.schema, self.name)
else:
self.fullname = self.name
self.implicit_returning = implicit_returning
_reflect_info = kw.pop("_reflect_info", None)
self.comment = comment
if info is not None:
self.info = info
if listeners is not None:
for evt, fn in listeners:
event.listen(self, evt, fn)
self._prefixes = prefixes if prefixes else []
self._extra_kwargs(**kw)
# load column definitions from the database if 'autoload' is defined
# we do it after the table is in the singleton dictionary to support
# circular foreign keys
if autoload_with is not None:
self._autoload(
metadata,
autoload_with,
include_columns,
_extend_on=_extend_on,
_reflect_info=_reflect_info,
resolve_fks=resolve_fks,
)
# initialize all the column, etc. objects. done after reflection to
# allow user-overrides
self._init_items(
*args,
allow_replacements=extend_existing
or keep_existing
or autoload_with,
)
def _autoload(
self,
metadata: MetaData,
autoload_with: Union[Engine, Connection],
include_columns: Optional[Collection[str]],
exclude_columns: Collection[str] = (),
resolve_fks: bool = True,
_extend_on: Optional[Set[Table]] = None,
_reflect_info: _ReflectionInfo | None = None,
) -> None:
insp = inspection.inspect(autoload_with)
with insp._inspection_context() as conn_insp:
conn_insp.reflect_table(
self,
include_columns,
exclude_columns,
resolve_fks,
_extend_on=_extend_on,
_reflect_info=_reflect_info,
)
@property
def _sorted_constraints(self) -> List[Constraint]:
"""Return the set of constraints as a list, sorted by creation
order.
"""
return sorted(self.constraints, key=lambda c: c._creation_order)
@property
def foreign_key_constraints(self) -> Set[ForeignKeyConstraint]:
""":class:`_schema.ForeignKeyConstraint` objects referred to by this
:class:`_schema.Table`.
This list is produced from the collection of
:class:`_schema.ForeignKey`
objects currently associated.
.. seealso::
:attr:`_schema.Table.constraints`
:attr:`_schema.Table.foreign_keys`
:attr:`_schema.Table.indexes`
"""
return {
fkc.constraint
for fkc in self.foreign_keys
if fkc.constraint is not None
}
def _init_existing(self, *args: Any, **kwargs: Any) -> None:
autoload_with = kwargs.pop("autoload_with", None)
autoload = kwargs.pop("autoload", autoload_with is not None)
autoload_replace = kwargs.pop("autoload_replace", True)
schema = kwargs.pop("schema", None)
_extend_on = kwargs.pop("_extend_on", None)
_reflect_info = kwargs.pop("_reflect_info", None)
# these arguments are only used with _init()
kwargs.pop("extend_existing", False)
kwargs.pop("keep_existing", False)
if schema and schema != self.schema:
raise exc.ArgumentError(
"Can't change schema of existing table from '%s' to '%s'",
(self.schema, schema),
)
include_columns = kwargs.pop("include_columns", None)
if include_columns is not None:
for c in self.c:
if c.name not in include_columns:
self._columns.remove(c)
resolve_fks = kwargs.pop("resolve_fks", True)
for key in ("quote", "quote_schema"):
if key in kwargs:
raise exc.ArgumentError(
"Can't redefine 'quote' or 'quote_schema' arguments"
)
# update `self` with these kwargs, if provided
self.comment = kwargs.pop("comment", self.comment)
self.implicit_returning = kwargs.pop(
"implicit_returning", self.implicit_returning
)
self.info = kwargs.pop("info", self.info)
exclude_columns: _typing_Sequence[str]
if autoload:
if not autoload_replace:
# don't replace columns already present.
# we'd like to do this for constraints also however we don't
# have simple de-duping for unnamed constraints.
exclude_columns = [c.name for c in self.c]
else:
exclude_columns = ()
self._autoload(
self.metadata,
autoload_with,
include_columns,
exclude_columns,
resolve_fks,
_extend_on=_extend_on,
_reflect_info=_reflect_info,
)
self._extra_kwargs(**kwargs)
self._init_items(*args)
def _extra_kwargs(self, **kwargs: Any) -> None:
self._validate_dialect_kwargs(kwargs)
def _init_collections(self) -> None:
pass
def _reset_exported(self) -> None:
pass
@util.ro_non_memoized_property
def _autoincrement_column(self) -> Optional[Column[Any]]:
return self.primary_key._autoincrement_column
@property
def key(self) -> str:
"""Return the 'key' for this :class:`_schema.Table`.
This value is used as the dictionary key within the
:attr:`_schema.MetaData.tables` collection. It is typically the same
as that of :attr:`_schema.Table.name` for a table with no
:attr:`_schema.Table.schema`
set; otherwise it is typically of the form
``schemaname.tablename``.
"""
return _get_table_key(self.name, self.schema)
def __repr__(self) -> str:
return "Table(%s)" % ", ".join(
[repr(self.name)]
+ [repr(self.metadata)]
+ [repr(x) for x in self.columns]
+ ["%s=%s" % (k, repr(getattr(self, k))) for k in ["schema"]]
)
def __str__(self) -> str:
return _get_table_key(self.description, self.schema)
def add_is_dependent_on(self, table: Table) -> None:
"""Add a 'dependency' for this Table.
This is another Table object which must be created
first before this one can, or dropped after this one.
Usually, dependencies between tables are determined via
ForeignKey objects. However, for other situations that
create dependencies outside of foreign keys (rules, inheriting),
this method can manually establish such a link.
"""
self._extra_dependencies.add(table)
def append_column(
self, column: ColumnClause[Any], replace_existing: bool = False
) -> None:
"""Append a :class:`_schema.Column` to this :class:`_schema.Table`.
The "key" of the newly added :class:`_schema.Column`, i.e. the
value of its ``.key`` attribute, will then be available
in the ``.c`` collection of this :class:`_schema.Table`, and the
column definition will be included in any CREATE TABLE, SELECT,
UPDATE, etc. statements generated from this :class:`_schema.Table`
construct.
Note that this does **not** change the definition of the table
as it exists within any underlying database, assuming that
table has already been created in the database. Relational
databases support the addition of columns to existing tables
using the SQL ALTER command, which would need to be
emitted for an already-existing table that doesn't contain
the newly added column.
:param replace_existing: When ``True``, allows replacing existing
columns. When ``False``, the default, an warning will be raised
if a column with the same ``.key`` already exists. A future
version of sqlalchemy will instead rise a warning.
.. versionadded:: 1.4.0
"""
column._set_parent_with_dispatch(
self, allow_replacements=replace_existing
)
def append_constraint(self, constraint: Union[Index, Constraint]) -> None:
"""Append a :class:`_schema.Constraint` to this
:class:`_schema.Table`.
This has the effect of the constraint being included in any
future CREATE TABLE statement, assuming specific DDL creation
events have not been associated with the given
:class:`_schema.Constraint` object.
Note that this does **not** produce the constraint within the
relational database automatically, for a table that already exists
in the database. To add a constraint to an
existing relational database table, the SQL ALTER command must
be used. SQLAlchemy also provides the
:class:`.AddConstraint` construct which can produce this SQL when
invoked as an executable clause.
"""
constraint._set_parent_with_dispatch(self)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
metadata = parent
assert isinstance(metadata, MetaData)
metadata._add_table(self.name, self.schema, self)
self.metadata = metadata
def create(self, bind: _CreateDropBind, checkfirst: bool = False) -> None:
"""Issue a ``CREATE`` statement for this
:class:`_schema.Table`, using the given
:class:`.Connection` or :class:`.Engine`
for connectivity.
.. seealso::
:meth:`_schema.MetaData.create_all`.
"""
bind._run_ddl_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst)
def drop(self, bind: _CreateDropBind, checkfirst: bool = False) -> None:
"""Issue a ``DROP`` statement for this
:class:`_schema.Table`, using the given
:class:`.Connection` or :class:`.Engine` for connectivity.
.. seealso::
:meth:`_schema.MetaData.drop_all`.
"""
bind._run_ddl_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst)
@util.deprecated(
"1.4",
":meth:`_schema.Table.tometadata` is renamed to "
":meth:`_schema.Table.to_metadata`",
)
def tometadata(
self,
metadata: MetaData,
schema: Union[str, Literal[SchemaConst.RETAIN_SCHEMA]] = RETAIN_SCHEMA,
referred_schema_fn: Optional[
Callable[
[Table, Optional[str], ForeignKeyConstraint, Optional[str]],
Optional[str],
]
] = None,
name: Optional[str] = None,
) -> Table:
"""Return a copy of this :class:`_schema.Table`
associated with a different
:class:`_schema.MetaData`.
See :meth:`_schema.Table.to_metadata` for a full description.
"""
return self.to_metadata(
metadata,
schema=schema,
referred_schema_fn=referred_schema_fn,
name=name,
)
def to_metadata(
self,
metadata: MetaData,
schema: Union[str, Literal[SchemaConst.RETAIN_SCHEMA]] = RETAIN_SCHEMA,
referred_schema_fn: Optional[
Callable[
[Table, Optional[str], ForeignKeyConstraint, Optional[str]],
Optional[str],
]
] = None,
name: Optional[str] = None,
) -> Table:
"""Return a copy of this :class:`_schema.Table` associated with a
different :class:`_schema.MetaData`.
E.g.::
m1 = MetaData()
user = Table('user', m1, Column('id', Integer, primary_key=True))
m2 = MetaData()
user_copy = user.to_metadata(m2)
.. versionchanged:: 1.4 The :meth:`_schema.Table.to_metadata` function
was renamed from :meth:`_schema.Table.tometadata`.
:param metadata: Target :class:`_schema.MetaData` object,
into which the
new :class:`_schema.Table` object will be created.
:param schema: optional string name indicating the target schema.
Defaults to the special symbol :attr:`.RETAIN_SCHEMA` which indicates
that no change to the schema name should be made in the new
:class:`_schema.Table`. If set to a string name, the new
:class:`_schema.Table`
will have this new name as the ``.schema``. If set to ``None``, the
schema will be set to that of the schema set on the target
:class:`_schema.MetaData`, which is typically ``None`` as well,
unless
set explicitly::
m2 = MetaData(schema='newschema')
# user_copy_one will have "newschema" as the schema name
user_copy_one = user.to_metadata(m2, schema=None)
m3 = MetaData() # schema defaults to None
# user_copy_two will have None as the schema name
user_copy_two = user.to_metadata(m3, schema=None)
:param referred_schema_fn: optional callable which can be supplied
in order to provide for the schema name that should be assigned
to the referenced table of a :class:`_schema.ForeignKeyConstraint`.
The callable accepts this parent :class:`_schema.Table`, the
target schema that we are changing to, the
:class:`_schema.ForeignKeyConstraint` object, and the existing
"target schema" of that constraint. The function should return the
string schema name that should be applied. To reset the schema
to "none", return the symbol :data:`.BLANK_SCHEMA`. To effect no
change, return ``None`` or :data:`.RETAIN_SCHEMA`.
.. versionchanged:: 1.4.33 The ``referred_schema_fn`` function
may return the :data:`.BLANK_SCHEMA` or :data:`.RETAIN_SCHEMA`
symbols.
E.g.::
def referred_schema_fn(table, to_schema,
constraint, referred_schema):
if referred_schema == 'base_tables':
return referred_schema
else:
return to_schema
new_table = table.to_metadata(m2, schema="alt_schema",
referred_schema_fn=referred_schema_fn)
.. versionadded:: 0.9.2
:param name: optional string name indicating the target table name.
If not specified or None, the table name is retained. This allows
a :class:`_schema.Table` to be copied to the same
:class:`_schema.MetaData` target
with a new name.
.. versionadded:: 1.0.0
"""
if name is None:
name = self.name
actual_schema: Optional[str]
if schema is RETAIN_SCHEMA:
actual_schema = self.schema
elif schema is None:
actual_schema = metadata.schema
else:
actual_schema = schema # type: ignore
key = _get_table_key(name, actual_schema)
if key in metadata.tables:
util.warn(
"Table '%s' already exists within the given "
"MetaData - not copying." % self.description
)
return metadata.tables[key]
args = []
for col in self.columns:
args.append(col._copy(schema=actual_schema))
table = Table(
name,
metadata,
schema=actual_schema,
comment=self.comment,
*args,
**self.kwargs,
)
for const in self.constraints:
if isinstance(const, ForeignKeyConstraint):
referred_schema = const._referred_schema
if referred_schema_fn:
fk_constraint_schema = referred_schema_fn(
self, actual_schema, const, referred_schema
)
else:
fk_constraint_schema = (
actual_schema
if referred_schema == self.schema
else None
)
table.append_constraint(
const._copy(
schema=fk_constraint_schema, target_table=table
)
)
elif not const._type_bound:
# skip unique constraints that would be generated
# by the 'unique' flag on Column
if const._column_flag:
continue
table.append_constraint(
const._copy(schema=actual_schema, target_table=table)
)
for index in self.indexes:
# skip indexes that would be generated
# by the 'index' flag on Column
if index._column_flag:
continue
Index(
index.name,
unique=index.unique,
*[
_copy_expression(expr, self, table)
for expr in index._table_bound_expressions
],
_table=table,
**index.kwargs,
)
return self._schema_item_copy(table)
class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
"""Represents a column in a database table."""
__visit_name__ = "column"
inherit_cache = True
key: str
def __init__(
self,
__name_pos: Optional[
Union[str, _TypeEngineArgument[_T], SchemaEventTarget]
] = None,
__type_pos: Optional[
Union[_TypeEngineArgument[_T], SchemaEventTarget]
] = None,
*args: SchemaEventTarget,
name: Optional[str] = None,
type_: Optional[_TypeEngineArgument[_T]] = None,
autoincrement: Union[bool, Literal["auto", "ignore_fk"]] = "auto",
default: Optional[Any] = None,
doc: Optional[str] = None,
key: Optional[str] = None,
index: Optional[bool] = None,
unique: Optional[bool] = None,
info: Optional[_InfoType] = None,
nullable: Optional[
Union[bool, Literal[SchemaConst.NULL_UNSPECIFIED]]
] = SchemaConst.NULL_UNSPECIFIED,
onupdate: Optional[Any] = None,
primary_key: bool = False,
server_default: Optional[_ServerDefaultType] = None,
server_onupdate: Optional[FetchedValue] = None,
quote: Optional[bool] = None,
system: bool = False,
comment: Optional[str] = None,
_proxies: Optional[Any] = None,
**dialect_kwargs: Any,
):
r"""
Construct a new ``Column`` object.
:param name: The name of this column as represented in the database.
This argument may be the first positional argument, or specified
via keyword.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word. Names with any number of upper
case characters will be quoted and sent exactly. Note that this
behavior applies even for databases which standardize upper
case names as case insensitive such as Oracle.
The name field may be omitted at construction time and applied
later, at any time before the Column is associated with a
:class:`_schema.Table`. This is to support convenient
usage within the :mod:`~sqlalchemy.ext.declarative` extension.
:param type\_: The column's type, indicated using an instance which
subclasses :class:`~sqlalchemy.types.TypeEngine`. If no arguments
are required for the type, the class of the type can be sent
as well, e.g.::
# use a type with arguments
Column('data', String(50))
# use no arguments
Column('level', Integer)
The ``type`` argument may be the second positional argument
or specified by keyword.
If the ``type`` is ``None`` or is omitted, it will first default to
the special type :class:`.NullType`. If and when this
:class:`_schema.Column` is made to refer to another column using
:class:`_schema.ForeignKey` and/or
:class:`_schema.ForeignKeyConstraint`, the type
of the remote-referenced column will be copied to this column as
well, at the moment that the foreign key is resolved against that
remote :class:`_schema.Column` object.
.. versionchanged:: 0.9.0
Support for propagation of type to a :class:`_schema.Column`
from its
:class:`_schema.ForeignKey` object has been improved and should be
more reliable and timely.
:param \*args: Additional positional arguments include various
:class:`.SchemaItem` derived constructs which will be applied
as options to the column. These include instances of
:class:`.Constraint`, :class:`_schema.ForeignKey`,
:class:`.ColumnDefault`, :class:`.Sequence`, :class:`.Computed`
:class:`.Identity`. In some cases an
equivalent keyword argument is available such as ``server_default``,
``default`` and ``unique``.
:param autoincrement: Set up "auto increment" semantics for an
**integer primary key column with no foreign key dependencies**
(see later in this docstring for a more specific definition).
This may influence the :term:`DDL` that will be emitted for
this column during a table create, as well as how the column
will be considered when INSERT statements are compiled and
executed.
The default value is the string ``"auto"``,
which indicates that a single-column (i.e. non-composite) primary key
that is of an INTEGER type with no other client-side or server-side
default constructs indicated should receive auto increment semantics
automatically. Other values include ``True`` (force this column to
have auto-increment semantics for a :term:`composite primary key` as
well), ``False`` (this column should never have auto-increment
semantics), and the string ``"ignore_fk"`` (special-case for foreign
key columns, see below).
The term "auto increment semantics" refers both to the kind of DDL
that will be emitted for the column within a CREATE TABLE statement,
when methods such as :meth:`.MetaData.create_all` and
:meth:`.Table.create` are invoked, as well as how the column will be
considered when an INSERT statement is compiled and emitted to the
database:
* **DDL rendering** (i.e. :meth:`.MetaData.create_all`,
:meth:`.Table.create`): When used on a :class:`.Column` that has
no other
default-generating construct associated with it (such as a
:class:`.Sequence` or :class:`.Identity` construct), the parameter
will imply that database-specific keywords such as PostgreSQL
``SERIAL``, MySQL ``AUTO_INCREMENT``, or ``IDENTITY`` on SQL Server
should also be rendered. Not every database backend has an
"implied" default generator available; for example the Oracle
backend always needs an explicit construct such as
:class:`.Identity` to be included with a :class:`.Column` in order
for the DDL rendered to include auto-generating constructs to also
be produced in the database.
* **INSERT semantics** (i.e. when a :func:`_sql.insert` construct is
compiled into a SQL string and is then executed on a database using
:meth:`_engine.Connection.execute` or equivalent): A single-row
INSERT statement will be known to produce a new integer primary key
value automatically for this column, which will be accessible
after the statement is invoked via the
:attr:`.CursorResult.inserted_primary_key` attribute upon the
:class:`_result.Result` object. This also applies towards use of the
ORM when ORM-mapped objects are persisted to the database,
indicating that a new integer primary key will be available to
become part of the :term:`identity key` for that object. This
behavior takes place regardless of what DDL constructs are
associated with the :class:`_schema.Column` and is independent
of the "DDL Rendering" behavior discussed in the previous note
above.
The parameter may be set to ``True`` to indicate that a column which
is part of a composite (i.e. multi-column) primary key should
have autoincrement semantics, though note that only one column
within a primary key may have this setting. It can also
be set to ``True`` to indicate autoincrement semantics on a
column that has a client-side or server-side default configured,
however note that not all dialects can accommodate all styles
of default as an "autoincrement". It can also be
set to ``False`` on a single-column primary key that has a
datatype of INTEGER in order to disable auto increment semantics
for that column.
.. versionchanged:: 1.1 The autoincrement flag now defaults to
``"auto"`` which indicates autoincrement semantics by default
for single-column integer primary keys only; for composite
(multi-column) primary keys, autoincrement is never implicitly
enabled; as always, ``autoincrement=True`` will allow for
at most one of those columns to be an "autoincrement" column.
``autoincrement=True`` may also be set on a
:class:`_schema.Column`
that has an explicit client-side or server-side default,
subject to limitations of the backend database and dialect.
The setting *only* has an effect for columns which are:
* Integer derived (i.e. INT, SMALLINT, BIGINT).
* Part of the primary key
* Not referring to another column via :class:`_schema.ForeignKey`,
unless
the value is specified as ``'ignore_fk'``::
# turn on autoincrement for this column despite
# the ForeignKey()
Column('id', ForeignKey('other.id'),
primary_key=True, autoincrement='ignore_fk')
It is typically not desirable to have "autoincrement" enabled on a
column that refers to another via foreign key, as such a column is
required to refer to a value that originates from elsewhere.
The setting has these effects on columns that meet the
above criteria:
* DDL issued for the column, if the column does not already include
a default generating construct supported by the backend such as
:class:`.Identity`, will include database-specific
keywords intended to signify this column as an
"autoincrement" column for specific backends. Behavior for
primary SQLAlchemy dialects includes:
* AUTO INCREMENT on MySQL and MariaDB
* SERIAL on PostgreSQL
* IDENTITY on MS-SQL - this occurs even without the
:class:`.Identity` construct as the
:paramref:`.Column.autoincrement` parameter pre-dates this
construct.
* SQLite - SQLite integer primary key columns are implicitly
"auto incrementing" and no additional keywords are rendered;
to render the special SQLite keyword ``AUTOINCREMENT``
is not included as this is unnecessary and not recommended
by the database vendor. See the section
:ref:`sqlite_autoincrement` for more background.
* Oracle - The Oracle dialect has no default "autoincrement"
feature available at this time, instead the :class:`.Identity`
construct is recommended to achieve this (the :class:`.Sequence`
construct may also be used).
* Third-party dialects - consult those dialects' documentation
for details on their specific behaviors.
* When a single-row :func:`_sql.insert` construct is compiled and
executed, which does not set the :meth:`_sql.Insert.inline`
modifier, newly generated primary key values for this column
will be automatically retrieved upon statement execution
using a method specific to the database driver in use:
* MySQL, SQLite - calling upon ``cursor.lastrowid()``
(see
`https://www.python.org/dev/peps/pep-0249/#lastrowid
<https://www.python.org/dev/peps/pep-0249/#lastrowid>`_)
* PostgreSQL, SQL Server, Oracle - use RETURNING or an equivalent
construct when rendering an INSERT statement, and then retrieving
the newly generated primary key values after execution
* PostgreSQL, Oracle for :class:`_schema.Table` objects that
set :paramref:`_schema.Table.implicit_returning` to False -
for a :class:`.Sequence` only, the :class:`.Sequence` is invoked
explicitly before the INSERT statement takes place so that the
newly generated primary key value is available to the client
* SQL Server for :class:`_schema.Table` objects that
set :paramref:`_schema.Table.implicit_returning` to False -
the ``SELECT scope_identity()`` construct is used after the
INSERT statement is invoked to retrieve the newly generated
primary key value.
* Third-party dialects - consult those dialects' documentation
for details on their specific behaviors.
* For multiple-row :func:`_sql.insert` constructs invoked with
a list of parameters (i.e. "executemany" semantics), primary-key
retrieving behaviors are generally disabled, however there may
be special APIs that may be used to retrieve lists of new
primary key values for an "executemany", such as the psycopg2
"fast insertmany" feature. Such features are very new and
may not yet be well covered in documentation.
:param default: A scalar, Python callable, or
:class:`_expression.ColumnElement` expression representing the
*default value* for this column, which will be invoked upon insert
if this column is otherwise not specified in the VALUES clause of
the insert. This is a shortcut to using :class:`.ColumnDefault` as
a positional argument; see that class for full detail on the
structure of the argument.
Contrast this argument to
:paramref:`_schema.Column.server_default`
which creates a default generator on the database side.
.. seealso::
:ref:`metadata_defaults_toplevel`
:param doc: optional String that can be used by the ORM or similar
to document attributes on the Python side. This attribute does
**not** render SQL comments; use the
:paramref:`_schema.Column.comment`
parameter for this purpose.
:param key: An optional string identifier which will identify this
``Column`` object on the :class:`_schema.Table`.
When a key is provided,
this is the only identifier referencing the ``Column`` within the
application, including ORM attribute mapping; the ``name`` field
is used only when rendering SQL.
:param index: When ``True``, indicates that a :class:`_schema.Index`
construct will be automatically generated for this
:class:`_schema.Column`, which will result in a "CREATE INDEX"
statement being emitted for the :class:`_schema.Table` when the DDL
create operation is invoked.
Using this flag is equivalent to making use of the
:class:`_schema.Index` construct explicitly at the level of the
:class:`_schema.Table` construct itself::
Table(
"some_table",
metadata,
Column("x", Integer),
Index("ix_some_table_x", "x")
)
To add the :paramref:`_schema.Index.unique` flag to the
:class:`_schema.Index`, set both the
:paramref:`_schema.Column.unique` and
:paramref:`_schema.Column.index` flags to True simultaneously,
which will have the effect of rendering the "CREATE UNIQUE INDEX"
DDL instruction instead of "CREATE INDEX".
The name of the index is generated using the
:ref:`default naming convention <constraint_default_naming_convention>`
which for the :class:`_schema.Index` construct is of the form
``ix_<tablename>_<columnname>``.
As this flag is intended only as a convenience for the common case
of adding a single-column, default configured index to a table
definition, explicit use of the :class:`_schema.Index` construct
should be preferred for most use cases, including composite indexes
that encompass more than one column, indexes with SQL expressions
or ordering, backend-specific index configuration options, and
indexes that use a specific name.
.. note:: the :attr:`_schema.Column.index` attribute on
:class:`_schema.Column`
**does not indicate** if this column is indexed or not, only
if this flag was explicitly set here. To view indexes on
a column, view the :attr:`_schema.Table.indexes` collection
or use :meth:`_reflection.Inspector.get_indexes`.
.. seealso::
:ref:`schema_indexes`
:ref:`constraint_naming_conventions`
:paramref:`_schema.Column.unique`
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param nullable: When set to ``False``, will cause the "NOT NULL"
phrase to be added when generating DDL for the column. When
``True``, will normally generate nothing (in SQL this defaults to
"NULL"), except in some very specific backend-specific edge cases
where "NULL" may render explicitly.
Defaults to ``True`` unless :paramref:`_schema.Column.primary_key`
is also ``True`` or the column specifies a :class:`_sql.Identity`,
in which case it defaults to ``False``.
This parameter is only used when issuing CREATE TABLE statements.
.. note::
When the column specifies a :class:`_sql.Identity` this
parameter is in general ignored by the DDL compiler. The
PostgreSQL database allows nullable identity column by
setting this parameter to ``True`` explicitly.
:param onupdate: A scalar, Python callable, or
:class:`~sqlalchemy.sql.expression.ClauseElement` representing a
default value to be applied to the column within UPDATE
statements, which will be invoked upon update if this column is not
present in the SET clause of the update. This is a shortcut to
using :class:`.ColumnDefault` as a positional argument with
``for_update=True``.
.. seealso::
:ref:`metadata_defaults` - complete discussion of onupdate
:param primary_key: If ``True``, marks this column as a primary key
column. Multiple columns can have this flag set to specify
composite primary keys. As an alternative, the primary key of a
:class:`_schema.Table` can be specified via an explicit
:class:`.PrimaryKeyConstraint` object.
:param server_default: A :class:`.FetchedValue` instance, str, Unicode
or :func:`~sqlalchemy.sql.expression.text` construct representing
the DDL DEFAULT value for the column.
String types will be emitted as-is, surrounded by single quotes::
Column('x', Text, server_default="val")
x TEXT DEFAULT 'val'
A :func:`~sqlalchemy.sql.expression.text` expression will be
rendered as-is, without quotes::
Column('y', DateTime, server_default=text('NOW()'))
y DATETIME DEFAULT NOW()
Strings and text() will be converted into a
:class:`.DefaultClause` object upon initialization.
This parameter can also accept complex combinations of contextually
valid SQLAlchemy expressions or constructs::
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, MetaData, ARRAY, Text
from sqlalchemy.dialects.postgresql import array
engine = create_engine(
'postgresql+psycopg2://scott:tiger@localhost/mydatabase'
)
metadata_obj = MetaData()
tbl = Table(
"foo",
metadata_obj,
Column("bar",
ARRAY(Text),
server_default=array(["biz", "bang", "bash"])
)
)
metadata_obj.create_all(engine)
The above results in a table created with the following SQL::
CREATE TABLE foo (
bar TEXT[] DEFAULT ARRAY['biz', 'bang', 'bash']
)
Use :class:`.FetchedValue` to indicate that an already-existing
column will generate a default value on the database side which
will be available to SQLAlchemy for post-fetch after inserts. This
construct does not specify any DDL and the implementation is left
to the database, such as via a trigger.
.. seealso::
:ref:`server_defaults` - complete discussion of server side
defaults
:param server_onupdate: A :class:`.FetchedValue` instance
representing a database-side default generation function,
such as a trigger. This
indicates to SQLAlchemy that a newly generated value will be
available after updates. This construct does not actually
implement any kind of generation function within the database,
which instead must be specified separately.
.. warning:: This directive **does not** currently produce MySQL's
"ON UPDATE CURRENT_TIMESTAMP()" clause. See
:ref:`mysql_timestamp_onupdate` for background on how to
produce this clause.
.. seealso::
:ref:`triggered_columns`
:param quote: Force quoting of this column's name on or off,
corresponding to ``True`` or ``False``. When left at its default
of ``None``, the column identifier will be quoted according to
whether the name is case sensitive (identifiers with at least one
upper case character are treated as case sensitive), or if it's a
reserved word. This flag is only needed to force quoting of a
reserved word which is not known by the SQLAlchemy dialect.
:param unique: When ``True``, and the :paramref:`_schema.Column.index`
parameter is left at its default value of ``False``,
indicates that a :class:`_schema.UniqueConstraint`
construct will be automatically generated for this
:class:`_schema.Column`,
which will result in a "UNIQUE CONSTRAINT" clause referring
to this column being included
in the ``CREATE TABLE`` statement emitted, when the DDL create
operation for the :class:`_schema.Table` object is invoked.
When this flag is ``True`` while the
:paramref:`_schema.Column.index` parameter is simultaneously
set to ``True``, the effect instead is that a
:class:`_schema.Index` construct which includes the
:paramref:`_schema.Index.unique` parameter set to ``True``
is generated. See the documentation for
:paramref:`_schema.Column.index` for additional detail.
Using this flag is equivalent to making use of the
:class:`_schema.UniqueConstraint` construct explicitly at the
level of the :class:`_schema.Table` construct itself::
Table(
"some_table",
metadata,
Column("x", Integer),
UniqueConstraint("x")
)
The :paramref:`_schema.UniqueConstraint.name` parameter
of the unique constraint object is left at its default value
of ``None``; in the absence of a :ref:`naming convention <constraint_naming_conventions>`
for the enclosing :class:`_schema.MetaData`, the UNIQUE CONSTRAINT
construct will be emitted as unnamed, which typically invokes
a database-specific naming convention to take place.
As this flag is intended only as a convenience for the common case
of adding a single-column, default configured unique constraint to a table
definition, explicit use of the :class:`_schema.UniqueConstraint` construct
should be preferred for most use cases, including composite constraints
that encompass more than one column, backend-specific index configuration options, and
constraints that use a specific name.
.. note:: the :attr:`_schema.Column.unique` attribute on
:class:`_schema.Column`
**does not indicate** if this column has a unique constraint or
not, only if this flag was explicitly set here. To view
indexes and unique constraints that may involve this column,
view the
:attr:`_schema.Table.indexes` and/or
:attr:`_schema.Table.constraints` collections or use
:meth:`_reflection.Inspector.get_indexes` and/or
:meth:`_reflection.Inspector.get_unique_constraints`
.. seealso::
:ref:`schema_unique_constraint`
:ref:`constraint_naming_conventions`
:paramref:`_schema.Column.index`
:param system: When ``True``, indicates this is a "system" column,
that is a column which is automatically made available by the
database, and should not be included in the columns list for a
``CREATE TABLE`` statement.
For more elaborate scenarios where columns should be
conditionally rendered differently on different backends,
consider custom compilation rules for :class:`.CreateColumn`.
:param comment: Optional string that will render an SQL comment on
table creation.
.. versionadded:: 1.2 Added the
:paramref:`_schema.Column.comment`
parameter to :class:`_schema.Column`.
""" # noqa: E501, RST201, RST202
l_args = [__name_pos, __type_pos] + list(args)
del args
if l_args:
if isinstance(l_args[0], str):
if name is not None:
raise exc.ArgumentError(
"May not pass name positionally and as a keyword."
)
name = l_args.pop(0) # type: ignore
elif l_args[0] is None:
l_args.pop(0)
if l_args:
coltype = l_args[0]
if hasattr(coltype, "_sqla_type"):
if type_ is not None:
raise exc.ArgumentError(
"May not pass type_ positionally and as a keyword."
)
type_ = l_args.pop(0) # type: ignore
elif l_args[0] is None:
l_args.pop(0)
if name is not None:
name = quoted_name(name, quote)
elif quote is not None:
raise exc.ArgumentError(
"Explicit 'name' is required when " "sending 'quote' argument"
)
# name = None is expected to be an interim state
# note this use case is legacy now that ORM declarative has a
# dedicated "column" construct local to the ORM
super().__init__(name, type_) # type: ignore
self.key = key if key is not None else name # type: ignore
self.primary_key = primary_key
self._user_defined_nullable = udn = nullable
if udn is not NULL_UNSPECIFIED:
self.nullable = udn
else:
self.nullable = not primary_key
# these default to None because .index and .unique is *not*
# an informational flag about Column - there can still be an
# Index or UniqueConstraint referring to this Column.
self.index = index
self.unique = unique
self.system = system
self.doc = doc
self.autoincrement = autoincrement
self.constraints = set()
self.foreign_keys = set()
self.comment = comment
self.computed = None
self.identity = None
# check if this Column is proxying another column
if _proxies is not None:
self._proxies = _proxies
else:
# otherwise, add DDL-related events
if isinstance(self.type, SchemaEventTarget):
self.type._set_parent_with_dispatch(self)
for impl in self.type._variant_mapping.values():
if isinstance(impl, SchemaEventTarget):
impl._set_parent_with_dispatch(self)
if default is not None:
if not isinstance(default, (ColumnDefault, Sequence)):
default = ColumnDefault(default)
self.default = default
l_args.append(default)
else:
self.default = None
if onupdate is not None:
if not isinstance(onupdate, (ColumnDefault, Sequence)):
onupdate = ColumnDefault(onupdate, for_update=True)
self.onupdate = onupdate
l_args.append(onupdate)
else:
self.onpudate = None
self.server_default = server_default
self.server_onupdate = server_onupdate
if self.server_default is not None:
if isinstance(self.server_default, FetchedValue):
l_args.append(self.server_default._as_for_update(False))
else:
l_args.append(DefaultClause(self.server_default))
if self.server_onupdate is not None:
if isinstance(self.server_onupdate, FetchedValue):
l_args.append(self.server_onupdate._as_for_update(True))
else:
l_args.append(
DefaultClause(self.server_onupdate, for_update=True)
)
self._init_items(*cast(_typing_Sequence[SchemaItem], l_args))
util.set_creation_order(self)
if info is not None:
self.info = info
self._extra_kwargs(**dialect_kwargs)
table: Table
constraints: Set[Constraint]
foreign_keys: Set[ForeignKey]
"""A collection of all :class:`_schema.ForeignKey` marker objects
associated with this :class:`_schema.Column`.
Each object is a member of a :class:`_schema.Table`-wide
:class:`_schema.ForeignKeyConstraint`.
.. seealso::
:attr:`_schema.Table.foreign_keys`
"""
index: Optional[bool]
"""The value of the :paramref:`_schema.Column.index` parameter.
Does not indicate if this :class:`_schema.Column` is actually indexed
or not; use :attr:`_schema.Table.indexes`.
.. seealso::
:attr:`_schema.Table.indexes`
"""
unique: Optional[bool]
"""The value of the :paramref:`_schema.Column.unique` parameter.
Does not indicate if this :class:`_schema.Column` is actually subject to
a unique constraint or not; use :attr:`_schema.Table.indexes` and
:attr:`_schema.Table.constraints`.
.. seealso::
:attr:`_schema.Table.indexes`
:attr:`_schema.Table.constraints`.
"""
computed: Optional[Computed]
identity: Optional[Identity]
@util.memoized_property
def _gen_static_annotations_cache_key(self) -> bool: # type: ignore
"""special attribute used by cache key gen, if true, we will
use a static cache key for the annotations dictionary, else we
will generate a new cache key for annotations each time.
Added for #8790
"""
return self.table is not None and self.table._is_table
def _extra_kwargs(self, **kwargs: Any) -> None:
self._validate_dialect_kwargs(kwargs)
def __str__(self) -> str:
if self.name is None:
return "(no name)"
elif self.table is not None:
if self.table.named_with_column:
return self.table.description + "." + self.description
else:
return self.description
else:
return self.description
def references(self, column: Column[Any]) -> bool:
"""Return True if this Column references the given column via foreign
key."""
for fk in self.foreign_keys:
if fk.column.proxy_set.intersection(column.proxy_set):
return True
else:
return False
def append_foreign_key(self, fk: ForeignKey) -> None:
fk._set_parent_with_dispatch(self)
def __repr__(self) -> str:
kwarg = []
if self.key != self.name:
kwarg.append("key")
if self.primary_key:
kwarg.append("primary_key")
if not self.nullable:
kwarg.append("nullable")
if self.onupdate:
kwarg.append("onupdate")
if self.default:
kwarg.append("default")
if self.server_default:
kwarg.append("server_default")
if self.comment:
kwarg.append("comment")
return "Column(%s)" % ", ".join(
[repr(self.name)]
+ [repr(self.type)]
+ [repr(x) for x in self.foreign_keys if x is not None]
+ [repr(x) for x in self.constraints]
+ [
(
self.table is not None
and "table=<%s>" % self.table.description
or "table=None"
)
]
+ ["%s=%s" % (k, repr(getattr(self, k))) for k in kwarg]
)
def _set_parent(
self,
parent: SchemaEventTarget,
allow_replacements: bool = True,
**kw: Any,
) -> None:
table = parent
assert isinstance(table, Table)
if not self.name:
raise exc.ArgumentError(
"Column must be constructed with a non-blank name or "
"assign a non-blank .name before adding to a Table."
)
self._reset_memoizations()
if self.key is None:
self.key = self.name
existing = getattr(self, "table", None)
if existing is not None and existing is not table:
raise exc.ArgumentError(
"Column object '%s' already assigned to Table '%s'"
% (self.key, existing.description)
)
if self.key in table._columns:
col = table._columns[self.key]
if col is not self:
if not allow_replacements:
util.warn_deprecated(
"A column with name '%s' is already present "
"in table '%s'. Please use method "
":meth:`_schema.Table.append_column` with the "
"parameter ``replace_existing=True`` to replace an "
"existing column." % (self.key, table.name),
"1.4",
)
for fk in col.foreign_keys:
table.foreign_keys.remove(fk)
if fk.constraint in table.constraints:
# this might have been removed
# already, if it's a composite constraint
# and more than one col being replaced
table.constraints.remove(fk.constraint)
table._columns.replace(self)
self.table = table
if self.primary_key:
table.primary_key._replace(self)
elif self.key in table.primary_key:
raise exc.ArgumentError(
"Trying to redefine primary-key column '%s' as a "
"non-primary-key column on table '%s'"
% (self.key, table.fullname)
)
if self.index:
if isinstance(self.index, str):
raise exc.ArgumentError(
"The 'index' keyword argument on Column is boolean only. "
"To create indexes with a specific name, create an "
"explicit Index object external to the Table."
)
table.append_constraint(
Index(
None, self.key, unique=bool(self.unique), _column_flag=True
)
)
elif self.unique:
if isinstance(self.unique, str):
raise exc.ArgumentError(
"The 'unique' keyword argument on Column is boolean "
"only. To create unique constraints or indexes with a "
"specific name, append an explicit UniqueConstraint to "
"the Table's list of elements, or create an explicit "
"Index object external to the Table."
)
table.append_constraint(
UniqueConstraint(self.key, _column_flag=True)
)
self._setup_on_memoized_fks(lambda fk: fk._set_remote_table(table))
if self.identity and (
isinstance(self.default, Sequence)
or isinstance(self.onupdate, Sequence)
):
raise exc.ArgumentError(
"An column cannot specify both Identity and Sequence."
)
def _setup_on_memoized_fks(self, fn: Callable[..., Any]) -> None:
fk_keys = [
((self.table.key, self.key), False),
((self.table.key, self.name), True),
]
for fk_key, link_to_name in fk_keys:
if fk_key in self.table.metadata._fk_memos:
for fk in self.table.metadata._fk_memos[fk_key]:
if fk.link_to_name is link_to_name:
fn(fk)
def _on_table_attach(self, fn: Callable[..., Any]) -> None:
if self.table is not None:
fn(self, self.table)
else:
event.listen(self, "after_parent_attach", fn)
@util.deprecated(
"1.4",
"The :meth:`_schema.Column.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self, **kw: Any) -> Column[Any]:
return self._copy(**kw)
def _copy(self, **kw: Any) -> Column[Any]:
"""Create a copy of this ``Column``, uninitialized.
This is used in :meth:`_schema.Table.to_metadata`.
"""
# Constraint objects plus non-constraint-bound ForeignKey objects
args: List[SchemaItem] = [
c._copy(**kw)
for c in self.constraints
if not c._type_bound # type: ignore
] + [
c._copy(**kw) # type: ignore
for c in self.foreign_keys
if not c.constraint
]
# ticket #5276
column_kwargs = {}
for dialect_name in self.dialect_options:
dialect_options = self.dialect_options[dialect_name]._non_defaults
for (
dialect_option_key,
dialect_option_value,
) in dialect_options.items():
column_kwargs[
dialect_name + "_" + dialect_option_key
] = dialect_option_value
server_default = self.server_default
server_onupdate = self.server_onupdate
if isinstance(server_default, (Computed, Identity)):
# TODO: likely should be copied in all cases
args.append(server_default._copy(**kw))
server_default = server_onupdate = None
type_ = self.type
if isinstance(type_, SchemaEventTarget):
type_ = type_.copy(**kw)
# TODO: DefaultGenerator is not copied here! it's just used again
# with _set_parent() pointing to the old column. see the new
# use of _copy() in the new _merge() method
c = self._constructor(
name=self.name,
type_=type_,
key=self.key,
primary_key=self.primary_key,
unique=self.unique,
system=self.system,
# quote=self.quote, # disabled 2013-08-27 (commit 031ef080)
index=self.index,
autoincrement=self.autoincrement,
default=self.default,
server_default=server_default,
onupdate=self.onupdate,
server_onupdate=server_onupdate,
doc=self.doc,
comment=self.comment,
*args,
**column_kwargs,
)
# copy the state of "nullable" exactly, to accommodate for
# ORM flipping the .nullable flag directly
c.nullable = self.nullable
c._user_defined_nullable = self._user_defined_nullable
return self._schema_item_copy(c)
def _merge(self, other: Column[Any]) -> None:
"""merge the elements of another column into this one.
this is used by ORM pep-593 merge and will likely need a lot
of fixes.
"""
if self.primary_key:
other.primary_key = True
type_ = self.type
if not type_._isnull and other.type._isnull:
if isinstance(type_, SchemaEventTarget):
type_ = type_.copy()
other.type = type_
if isinstance(type_, SchemaEventTarget):
type_._set_parent_with_dispatch(other)
for impl in type_._variant_mapping.values():
if isinstance(impl, SchemaEventTarget):
impl._set_parent_with_dispatch(other)
if (
self._user_defined_nullable is not NULL_UNSPECIFIED
and other._user_defined_nullable is NULL_UNSPECIFIED
):
other.nullable = self.nullable
other._user_defined_nullable = self._user_defined_nullable
if self.default is not None and other.default is None:
new_default = self.default._copy()
new_default._set_parent(other)
if self.server_default and other.server_default is None:
new_server_default = self.server_default
if isinstance(new_server_default, FetchedValue):
new_server_default = new_server_default._copy()
new_server_default._set_parent(other)
else:
other.server_default = new_server_default
if self.server_onupdate and other.server_onupdate is None:
new_server_onupdate = self.server_onupdate
new_server_onupdate = new_server_onupdate._copy()
new_server_onupdate._set_parent(other)
if self.onupdate and other.onupdate is None:
new_onupdate = self.onupdate._copy()
new_onupdate._set_parent(other)
if self.index and not other.index:
other.index = True
if self.unique and not other.unique:
other.unique = True
for const in self.constraints:
if not const._type_bound:
new_const = const._copy()
new_const._set_parent(other)
for fk in self.foreign_keys:
if not fk.constraint:
new_fk = fk._copy()
new_fk._set_parent(other)
def _make_proxy(
self,
selectable: FromClause,
name: Optional[str] = None,
key: Optional[str] = None,
name_is_truncatable: bool = False,
compound_select_cols: Optional[
_typing_Sequence[ColumnElement[Any]]
] = None,
**kw: Any,
) -> Tuple[str, ColumnClause[_T]]:
"""Create a *proxy* for this column.
This is a copy of this ``Column`` referenced by a different parent
(such as an alias or select statement). The column should
be used only in select scenarios, as its full DDL/default
information is not transferred.
"""
fk = [
ForeignKey(
col if col is not None else f._colspec,
_unresolvable=col is None,
_constraint=f.constraint,
)
for f, col in [
(fk, fk._resolve_column(raiseerr=False))
for fk in self.foreign_keys
]
]
if name is None and self.name is None:
raise exc.InvalidRequestError(
"Cannot initialize a sub-selectable"
" with this Column object until its 'name' has "
"been assigned."
)
try:
c = self._constructor(
coercions.expect(
roles.TruncatedLabelRole, name if name else self.name
)
if name_is_truncatable
else (name or self.name),
self.type,
# this may actually be ._proxy_key when the key is incoming
key=key if key else name if name else self.key,
primary_key=self.primary_key,
nullable=self.nullable,
_proxies=list(compound_select_cols)
if compound_select_cols
else [self],
*fk,
)
except TypeError as err:
raise TypeError(
"Could not create a copy of this %r object. "
"Ensure the class includes a _constructor() "
"attribute or method which accepts the "
"standard Column constructor arguments, or "
"references the Column class itself." % self.__class__
) from err
c.table = selectable
c._propagate_attrs = selectable._propagate_attrs
if selectable._is_clone_of is not None:
c._is_clone_of = selectable._is_clone_of.columns.get(c.key)
if self.primary_key:
selectable.primary_key.add(c) # type: ignore
if fk:
selectable.foreign_keys.update(fk) # type: ignore
return c.key, c
class ForeignKey(DialectKWArgs, SchemaItem):
"""Defines a dependency between two columns.
``ForeignKey`` is specified as an argument to a :class:`_schema.Column`
object,
e.g.::
t = Table("remote_table", metadata,
Column("remote_id", ForeignKey("main_table.id"))
)
Note that ``ForeignKey`` is only a marker object that defines
a dependency between two columns. The actual constraint
is in all cases represented by the :class:`_schema.ForeignKeyConstraint`
object. This object will be generated automatically when
a ``ForeignKey`` is associated with a :class:`_schema.Column` which
in turn is associated with a :class:`_schema.Table`. Conversely,
when :class:`_schema.ForeignKeyConstraint` is applied to a
:class:`_schema.Table`,
``ForeignKey`` markers are automatically generated to be
present on each associated :class:`_schema.Column`, which are also
associated with the constraint object.
Note that you cannot define a "composite" foreign key constraint,
that is a constraint between a grouping of multiple parent/child
columns, using ``ForeignKey`` objects. To define this grouping,
the :class:`_schema.ForeignKeyConstraint` object must be used, and applied
to the :class:`_schema.Table`. The associated ``ForeignKey`` objects
are created automatically.
The ``ForeignKey`` objects associated with an individual
:class:`_schema.Column`
object are available in the `foreign_keys` collection
of that column.
Further examples of foreign key configuration are in
:ref:`metadata_foreignkeys`.
"""
__visit_name__ = "foreign_key"
parent: Column[Any]
_table_column: Optional[Column[Any]]
def __init__(
self,
column: _DDLColumnArgument,
_constraint: Optional[ForeignKeyConstraint] = None,
use_alter: bool = False,
name: Optional[str] = None,
onupdate: Optional[str] = None,
ondelete: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
link_to_name: bool = False,
match: Optional[str] = None,
info: Optional[_InfoType] = None,
comment: Optional[str] = None,
_unresolvable: bool = False,
**dialect_kw: Any,
):
r"""
Construct a column-level FOREIGN KEY.
The :class:`_schema.ForeignKey` object when constructed generates a
:class:`_schema.ForeignKeyConstraint`
which is associated with the parent
:class:`_schema.Table` object's collection of constraints.
:param column: A single target column for the key relationship. A
:class:`_schema.Column` object or a column name as a string:
``tablename.columnkey`` or ``schema.tablename.columnkey``.
``columnkey`` is the ``key`` which has been assigned to the column
(defaults to the column name itself), unless ``link_to_name`` is
``True`` in which case the rendered name of the column is used.
:param name: Optional string. An in-database name for the key if
`constraint` is not provided.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally
assigned ``key``.
:param use_alter: passed to the underlying
:class:`_schema.ForeignKeyConstraint`
to indicate the constraint should
be generated/dropped externally from the CREATE TABLE/ DROP TABLE
statement. See :paramref:`_schema.ForeignKeyConstraint.use_alter`
for further description.
.. seealso::
:paramref:`_schema.ForeignKeyConstraint.use_alter`
:ref:`use_alter`
:param match: Optional string. If set, emit MATCH <value> when issuing
DDL for this constraint. Typical values include SIMPLE, PARTIAL
and FULL.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param comment: Optional string that will render an SQL comment on
foreign key constraint creation.
.. versionadded:: 2.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. The
arguments are ultimately handled by a corresponding
:class:`_schema.ForeignKeyConstraint`.
See the documentation regarding
an individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
.. versionadded:: 0.9.2
"""
self._colspec = coercions.expect(roles.DDLReferredColumnRole, column)
self._unresolvable = _unresolvable
if isinstance(self._colspec, str):
self._table_column = None
else:
self._table_column = self._colspec
if not isinstance(
self._table_column.table, (type(None), TableClause)
):
raise exc.ArgumentError(
"ForeignKey received Column not bound "
"to a Table, got: %r" % self._table_column.table
)
# the linked ForeignKeyConstraint.
# ForeignKey will create this when parent Column
# is attached to a Table, *or* ForeignKeyConstraint
# object passes itself in when creating ForeignKey
# markers.
self.constraint = _constraint
# .parent is not Optional under normal use
self.parent = None # type: ignore
self.use_alter = use_alter
self.name = name
self.onupdate = onupdate
self.ondelete = ondelete
self.deferrable = deferrable
self.initially = initially
self.link_to_name = link_to_name
self.match = match
self.comment = comment
if info:
self.info = info
self._unvalidated_dialect_kw = dialect_kw
def __repr__(self) -> str:
return "ForeignKey(%r)" % self._get_colspec()
@util.deprecated(
"1.4",
"The :meth:`_schema.ForeignKey.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self, *, schema: Optional[str] = None, **kw: Any) -> ForeignKey:
return self._copy(schema=schema, **kw)
def _copy(self, *, schema: Optional[str] = None, **kw: Any) -> ForeignKey:
"""Produce a copy of this :class:`_schema.ForeignKey` object.
The new :class:`_schema.ForeignKey` will not be bound
to any :class:`_schema.Column`.
This method is usually used by the internal
copy procedures of :class:`_schema.Column`, :class:`_schema.Table`,
and :class:`_schema.MetaData`.
:param schema: The returned :class:`_schema.ForeignKey` will
reference the original table and column name, qualified
by the given string schema name.
"""
fk = ForeignKey(
self._get_colspec(schema=schema),
use_alter=self.use_alter,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name,
match=self.match,
comment=self.comment,
**self._unvalidated_dialect_kw,
)
return self._schema_item_copy(fk)
def _get_colspec(
self,
schema: Optional[
Union[
str,
Literal[SchemaConst.RETAIN_SCHEMA, SchemaConst.BLANK_SCHEMA],
]
] = None,
table_name: Optional[str] = None,
_is_copy: bool = False,
) -> str:
"""Return a string based 'column specification' for this
:class:`_schema.ForeignKey`.
This is usually the equivalent of the string-based "tablename.colname"
argument first passed to the object's constructor.
"""
if schema not in (None, RETAIN_SCHEMA):
_schema, tname, colname = self._column_tokens
if table_name is not None:
tname = table_name
if schema is BLANK_SCHEMA:
return "%s.%s" % (tname, colname)
else:
return "%s.%s.%s" % (schema, tname, colname)
elif table_name:
schema, tname, colname = self._column_tokens
if schema:
return "%s.%s.%s" % (schema, table_name, colname)
else:
return "%s.%s" % (table_name, colname)
elif self._table_column is not None:
if self._table_column.table is None:
if _is_copy:
raise exc.InvalidRequestError(
f"Can't copy ForeignKey object which refers to "
f"non-table bound Column {self._table_column!r}"
)
else:
return self._table_column.key
return "%s.%s" % (
self._table_column.table.fullname,
self._table_column.key,
)
else:
assert isinstance(self._colspec, str)
return self._colspec
@property
def _referred_schema(self) -> Optional[str]:
return self._column_tokens[0]
def _table_key(self) -> Any:
if self._table_column is not None:
if self._table_column.table is None:
return None
else:
return self._table_column.table.key
else:
schema, tname, colname = self._column_tokens
return _get_table_key(tname, schema)
target_fullname = property(_get_colspec)
def references(self, table: Table) -> bool:
"""Return True if the given :class:`_schema.Table`
is referenced by this
:class:`_schema.ForeignKey`."""
return table.corresponding_column(self.column) is not None
def get_referent(self, table: FromClause) -> Optional[Column[Any]]:
"""Return the :class:`_schema.Column` in the given
:class:`_schema.Table` (or any :class:`.FromClause`)
referenced by this :class:`_schema.ForeignKey`.
Returns None if this :class:`_schema.ForeignKey`
does not reference the given
:class:`_schema.Table`.
"""
# our column is a Column, and any subquery etc. proxying us
# would be doing so via another Column, so that's what would
# be returned here
return table.columns.corresponding_column(self.column) # type: ignore
@util.memoized_property
def _column_tokens(self) -> Tuple[Optional[str], str, Optional[str]]:
"""parse a string-based _colspec into its component parts."""
m = self._get_colspec().split(".")
if m is None:
raise exc.ArgumentError(
"Invalid foreign key column specification: %s" % self._colspec
)
if len(m) == 1:
tname = m.pop()
colname = None
else:
colname = m.pop()
tname = m.pop()
# A FK between column 'bar' and table 'foo' can be
# specified as 'foo', 'foo.bar', 'dbo.foo.bar',
# 'otherdb.dbo.foo.bar'. Once we have the column name and
# the table name, treat everything else as the schema
# name. Some databases (e.g. Sybase) support
# inter-database foreign keys. See tickets#1341 and --
# indirectly related -- Ticket #594. This assumes that '.'
# will never appear *within* any component of the FK.
if len(m) > 0:
schema = ".".join(m)
else:
schema = None
return schema, tname, colname
def _resolve_col_tokens(self) -> Tuple[Table, str, Optional[str]]:
if self.parent is None:
raise exc.InvalidRequestError(
"this ForeignKey object does not yet have a "
"parent Column associated with it."
)
elif self.parent.table is None:
raise exc.InvalidRequestError(
"this ForeignKey's parent column is not yet associated "
"with a Table."
)
parenttable = self.parent.table
if self._unresolvable:
schema, tname, colname = self._column_tokens
tablekey = _get_table_key(tname, schema)
return parenttable, tablekey, colname
# assertion
# basically Column._make_proxy() sends the actual
# target Column to the ForeignKey object, so the
# string resolution here is never called.
for c in self.parent.base_columns:
if isinstance(c, Column):
assert c.table is parenttable
break
else:
assert False
######################
schema, tname, colname = self._column_tokens
if schema is None and parenttable.metadata.schema is not None:
schema = parenttable.metadata.schema
tablekey = _get_table_key(tname, schema)
return parenttable, tablekey, colname
def _link_to_col_by_colstring(
self, parenttable: Table, table: Table, colname: Optional[str]
) -> Column[Any]:
_column = None
if colname is None:
# colname is None in the case that ForeignKey argument
# was specified as table name only, in which case we
# match the column name to the same column on the
# parent.
# this use case wasn't working in later 1.x series
# as it had no test coverage; fixed in 2.0
parent = self.parent
assert parent is not None
key = parent.key
_column = table.c.get(key, None)
elif self.link_to_name:
key = colname
for c in table.c:
if c.name == colname:
_column = c
else:
key = colname
_column = table.c.get(colname, None)
if _column is None:
raise exc.NoReferencedColumnError(
"Could not initialize target column "
"for ForeignKey '%s' on table '%s': "
"table '%s' has no column named '%s'"
% (self._colspec, parenttable.name, table.name, key),
table.name,
key,
)
return _column
def _set_target_column(self, column: Column[Any]) -> None:
assert self.parent is not None
# propagate TypeEngine to parent if it didn't have one
if self.parent.type._isnull:
self.parent.type = column.type
# super-edgy case, if other FKs point to our column,
# they'd get the type propagated out also.
def set_type(fk: ForeignKey) -> None:
if fk.parent.type._isnull:
fk.parent.type = column.type
self.parent._setup_on_memoized_fks(set_type)
self.column = column # type: ignore
@util.ro_memoized_property
def column(self) -> Column[Any]:
"""Return the target :class:`_schema.Column` referenced by this
:class:`_schema.ForeignKey`.
If no target column has been established, an exception
is raised.
"""
return self._resolve_column()
@overload
def _resolve_column(self, *, raiseerr: Literal[True] = ...) -> Column[Any]:
...
@overload
def _resolve_column(
self, *, raiseerr: bool = ...
) -> Optional[Column[Any]]:
...
def _resolve_column(
self, *, raiseerr: bool = True
) -> Optional[Column[Any]]:
_column: Column[Any]
if isinstance(self._colspec, str):
parenttable, tablekey, colname = self._resolve_col_tokens()
if self._unresolvable or tablekey not in parenttable.metadata:
if not raiseerr:
return None
raise exc.NoReferencedTableError(
"Foreign key associated with column '%s' could not find "
"table '%s' with which to generate a "
"foreign key to target column '%s'"
% (self.parent, tablekey, colname),
tablekey,
)
elif parenttable.key not in parenttable.metadata:
if not raiseerr:
return None
raise exc.InvalidRequestError(
"Table %s is no longer associated with its "
"parent MetaData" % parenttable
)
else:
table = parenttable.metadata.tables[tablekey]
return self._link_to_col_by_colstring(
parenttable, table, colname
)
elif hasattr(self._colspec, "__clause_element__"):
_column = self._colspec.__clause_element__()
return _column
else:
_column = self._colspec
return _column
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
assert isinstance(parent, Column)
if self.parent is not None and self.parent is not parent:
raise exc.InvalidRequestError(
"This ForeignKey already has a parent !"
)
self.parent = parent
self.parent.foreign_keys.add(self)
self.parent._on_table_attach(self._set_table)
def _set_remote_table(self, table: Table) -> None:
parenttable, _, colname = self._resolve_col_tokens()
_column = self._link_to_col_by_colstring(parenttable, table, colname)
self._set_target_column(_column)
assert self.constraint is not None
self.constraint._validate_dest_table(table)
def _remove_from_metadata(self, metadata: MetaData) -> None:
parenttable, table_key, colname = self._resolve_col_tokens()
fk_key = (table_key, colname)
if self in metadata._fk_memos[fk_key]:
# TODO: no test coverage for self not in memos
metadata._fk_memos[fk_key].remove(self)
def _set_table(self, column: Column[Any], table: Table) -> None:
# standalone ForeignKey - create ForeignKeyConstraint
# on the hosting Table when attached to the Table.
assert isinstance(table, Table)
if self.constraint is None:
self.constraint = ForeignKeyConstraint(
[],
[],
use_alter=self.use_alter,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
deferrable=self.deferrable,
initially=self.initially,
match=self.match,
comment=self.comment,
**self._unvalidated_dialect_kw,
)
self.constraint._append_element(column, self)
self.constraint._set_parent_with_dispatch(table)
table.foreign_keys.add(self)
# set up remote ".column" attribute, or a note to pick it
# up when the other Table/Column shows up
if isinstance(self._colspec, str):
parenttable, table_key, colname = self._resolve_col_tokens()
fk_key = (table_key, colname)
if table_key in parenttable.metadata.tables:
table = parenttable.metadata.tables[table_key]
try:
_column = self._link_to_col_by_colstring(
parenttable, table, colname
)
except exc.NoReferencedColumnError:
# this is OK, we'll try later
pass
else:
self._set_target_column(_column)
parenttable.metadata._fk_memos[fk_key].append(self)
elif hasattr(self._colspec, "__clause_element__"):
_column = self._colspec.__clause_element__()
self._set_target_column(_column)
else:
_column = self._colspec
self._set_target_column(_column)
if TYPE_CHECKING:
def default_is_sequence(
obj: Optional[DefaultGenerator],
) -> TypeGuard[Sequence]:
...
def default_is_clause_element(
obj: Optional[DefaultGenerator],
) -> TypeGuard[ColumnElementColumnDefault]:
...
def default_is_scalar(
obj: Optional[DefaultGenerator],
) -> TypeGuard[ScalarElementColumnDefault]:
...
else:
default_is_sequence = operator.attrgetter("is_sequence")
default_is_clause_element = operator.attrgetter("is_clause_element")
default_is_scalar = operator.attrgetter("is_scalar")
class DefaultGenerator(Executable, SchemaItem):
"""Base class for column *default* values."""
__visit_name__ = "default_generator"
_is_default_generator = True
is_sequence = False
is_server_default = False
is_clause_element = False
is_callable = False
is_scalar = False
column: Optional[Column[Any]]
def __init__(self, for_update: bool = False) -> None:
self.for_update = for_update
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
if TYPE_CHECKING:
assert isinstance(parent, Column)
self.column = parent
if self.for_update:
self.column.onupdate = self
else:
self.column.default = self
def _copy(self) -> DefaultGenerator:
raise NotImplementedError()
def _execute_on_connection(
self,
connection: Connection,
distilled_params: _CoreMultiExecuteParams,
execution_options: CoreExecuteOptionsParameter,
) -> Any:
util.warn_deprecated(
"Using the .execute() method to invoke a "
"DefaultGenerator object is deprecated; please use "
"the .scalar() method.",
"2.0",
)
return self._execute_on_scalar(
connection, distilled_params, execution_options
)
def _execute_on_scalar(
self,
connection: Connection,
distilled_params: _CoreMultiExecuteParams,
execution_options: CoreExecuteOptionsParameter,
) -> Any:
return connection._execute_default(
self, distilled_params, execution_options
)
class ColumnDefault(DefaultGenerator, ABC):
"""A plain default value on a column.
This could correspond to a constant, a callable function,
or a SQL clause.
:class:`.ColumnDefault` is generated automatically
whenever the ``default``, ``onupdate`` arguments of
:class:`_schema.Column` are used. A :class:`.ColumnDefault`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, default=50)
Is equivalent to::
Column('foo', Integer, ColumnDefault(50))
"""
arg: Any
@overload
def __new__(
cls, arg: Callable[..., Any], for_update: bool = ...
) -> CallableColumnDefault:
...
@overload
def __new__(
cls, arg: ColumnElement[Any], for_update: bool = ...
) -> ColumnElementColumnDefault:
...
# if I return ScalarElementColumnDefault here, which is what's actually
# returned, mypy complains that
# overloads overlap w/ incompatible return types.
@overload
def __new__(cls, arg: object, for_update: bool = ...) -> ColumnDefault:
...
def __new__(
cls, arg: Any = None, for_update: bool = False
) -> ColumnDefault:
"""Construct a new :class:`.ColumnDefault`.
:param arg: argument representing the default value.
May be one of the following:
* a plain non-callable Python value, such as a
string, integer, boolean, or other simple type.
The default value will be used as is each time.
* a SQL expression, that is one which derives from
:class:`_expression.ColumnElement`. The SQL expression will
be rendered into the INSERT or UPDATE statement,
or in the case of a primary key column when
RETURNING is not used may be
pre-executed before an INSERT within a SELECT.
* A Python callable. The function will be invoked for each
new row subject to an INSERT or UPDATE.
The callable must accept exactly
zero or one positional arguments. The one-argument form
will receive an instance of the :class:`.ExecutionContext`,
which provides contextual information as to the current
:class:`_engine.Connection` in use as well as the current
statement and parameters.
"""
if isinstance(arg, FetchedValue):
raise exc.ArgumentError(
"ColumnDefault may not be a server-side default type."
)
elif callable(arg):
cls = CallableColumnDefault
elif isinstance(arg, ClauseElement):
cls = ColumnElementColumnDefault
elif arg is not None:
cls = ScalarElementColumnDefault
return object.__new__(cls)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.arg!r})"
class ScalarElementColumnDefault(ColumnDefault):
"""default generator for a fixed scalar Python value
.. versionadded: 2.0
"""
is_scalar = True
def __init__(self, arg: Any, for_update: bool = False) -> None:
self.for_update = for_update
self.arg = arg
def _copy(self) -> ScalarElementColumnDefault:
return ScalarElementColumnDefault(
arg=self.arg, for_update=self.for_update
)
_SQLExprDefault = Union["ColumnElement[Any]", "TextClause"]
class ColumnElementColumnDefault(ColumnDefault):
"""default generator for a SQL expression
.. versionadded:: 2.0
"""
is_clause_element = True
arg: _SQLExprDefault
def __init__(
self,
arg: _SQLExprDefault,
for_update: bool = False,
) -> None:
self.for_update = for_update
self.arg = arg
def _copy(self) -> ColumnElementColumnDefault:
return ColumnElementColumnDefault(
arg=self.arg, for_update=self.for_update
)
@util.memoized_property
@util.preload_module("sqlalchemy.sql.sqltypes")
def _arg_is_typed(self) -> bool:
sqltypes = util.preloaded.sql_sqltypes
return not isinstance(self.arg.type, sqltypes.NullType)
class _CallableColumnDefaultProtocol(Protocol):
def __call__(self, context: ExecutionContext) -> Any:
...
class CallableColumnDefault(ColumnDefault):
"""default generator for a callable Python function
.. versionadded:: 2.0
"""
is_callable = True
arg: _CallableColumnDefaultProtocol
def __init__(
self,
arg: Union[_CallableColumnDefaultProtocol, Callable[[], Any]],
for_update: bool = False,
) -> None:
self.for_update = for_update
self.arg = self._maybe_wrap_callable(arg)
def _copy(self) -> CallableColumnDefault:
return CallableColumnDefault(arg=self.arg, for_update=self.for_update)
def _maybe_wrap_callable(
self, fn: Union[_CallableColumnDefaultProtocol, Callable[[], Any]]
) -> _CallableColumnDefaultProtocol:
"""Wrap callables that don't accept a context.
This is to allow easy compatibility with default callables
that aren't specific to accepting of a context.
"""
try:
argspec = util.get_callable_argspec(fn, no_self=True)
except TypeError:
return util.wrap_callable(lambda ctx: fn(), fn) # type: ignore
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
if positionals == 0:
return util.wrap_callable(lambda ctx: fn(), fn) # type: ignore
elif positionals == 1:
return fn # type: ignore
else:
raise exc.ArgumentError(
"ColumnDefault Python function takes zero or one "
"positional arguments"
)
class IdentityOptions:
"""Defines options for a named database sequence or an identity column.
.. versionadded:: 1.3.18
.. seealso::
:class:`.Sequence`
"""
def __init__(
self,
start: Optional[int] = None,
increment: Optional[int] = None,
minvalue: Optional[int] = None,
maxvalue: Optional[int] = None,
nominvalue: Optional[bool] = None,
nomaxvalue: Optional[bool] = None,
cycle: Optional[bool] = None,
cache: Optional[int] = None,
order: Optional[bool] = None,
) -> None:
"""Construct a :class:`.IdentityOptions` object.
See the :class:`.Sequence` documentation for a complete description
of the parameters.
:param start: the starting index of the sequence.
:param increment: the increment value of the sequence.
:param minvalue: the minimum value of the sequence.
:param maxvalue: the maximum value of the sequence.
:param nominvalue: no minimum value of the sequence.
:param nomaxvalue: no maximum value of the sequence.
:param cycle: allows the sequence to wrap around when the maxvalue
or minvalue has been reached.
:param cache: optional integer value; number of future values in the
sequence which are calculated in advance.
:param order: optional boolean value; if ``True``, renders the
ORDER keyword.
"""
self.start = start
self.increment = increment
self.minvalue = minvalue
self.maxvalue = maxvalue
self.nominvalue = nominvalue
self.nomaxvalue = nomaxvalue
self.cycle = cycle
self.cache = cache
self.order = order
class Sequence(HasSchemaAttr, IdentityOptions, DefaultGenerator):
"""Represents a named database sequence.
The :class:`.Sequence` object represents the name and configurational
parameters of a database sequence. It also represents
a construct that can be "executed" by a SQLAlchemy :class:`_engine.Engine`
or :class:`_engine.Connection`,
rendering the appropriate "next value" function
for the target database and returning a result.
The :class:`.Sequence` is typically associated with a primary key column::
some_table = Table(
'some_table', metadata,
Column('id', Integer, Sequence('some_table_seq', start=1),
primary_key=True)
)
When CREATE TABLE is emitted for the above :class:`_schema.Table`, if the
target platform supports sequences, a CREATE SEQUENCE statement will
be emitted as well. For platforms that don't support sequences,
the :class:`.Sequence` construct is ignored.
.. seealso::
:ref:`defaults_sequences`
:class:`.CreateSequence`
:class:`.DropSequence`
"""
__visit_name__ = "sequence"
is_sequence = True
column: Optional[Column[Any]]
data_type: Optional[TypeEngine[int]]
def __init__(
self,
name: str,
start: Optional[int] = None,
increment: Optional[int] = None,
minvalue: Optional[int] = None,
maxvalue: Optional[int] = None,
nominvalue: Optional[bool] = None,
nomaxvalue: Optional[bool] = None,
cycle: Optional[bool] = None,
schema: Optional[Union[str, Literal[SchemaConst.BLANK_SCHEMA]]] = None,
cache: Optional[int] = None,
order: Optional[bool] = None,
data_type: Optional[_TypeEngineArgument[int]] = None,
optional: bool = False,
quote: Optional[bool] = None,
metadata: Optional[MetaData] = None,
quote_schema: Optional[bool] = None,
for_update: bool = False,
) -> None:
"""Construct a :class:`.Sequence` object.
:param name: the name of the sequence.
:param start: the starting index of the sequence. This value is
used when the CREATE SEQUENCE command is emitted to the database
as the value of the "START WITH" clause. If ``None``, the
clause is omitted, which on most platforms indicates a starting
value of 1.
.. versionchanged:: 2.0 The :paramref:`.Sequence.start` parameter
is required in order to have DDL emit "START WITH". This is a
reversal of a change made in version 1.4 which would implicitly
render "START WITH 1" if the :paramref:`.Sequence.start` were
not included. See :ref:`change_7211` for more detail.
:param increment: the increment value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "INCREMENT BY" clause. If ``None``,
the clause is omitted, which on most platforms indicates an
increment of 1.
:param minvalue: the minimum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "MINVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
minvalue of 1 and -2^63-1 for ascending and descending sequences,
respectively.
.. versionadded:: 1.0.7
:param maxvalue: the maximum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "MAXVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
maxvalue of 2^63-1 and -1 for ascending and descending sequences,
respectively.
.. versionadded:: 1.0.7
:param nominvalue: no minimum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "NO MINVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
minvalue of 1 and -2^63-1 for ascending and descending sequences,
respectively.
.. versionadded:: 1.0.7
:param nomaxvalue: no maximum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "NO MAXVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
maxvalue of 2^63-1 and -1 for ascending and descending sequences,
respectively.
.. versionadded:: 1.0.7
:param cycle: allows the sequence to wrap around when the maxvalue
or minvalue has been reached by an ascending or descending sequence
respectively. This value is used when the CREATE SEQUENCE command
is emitted to the database as the "CYCLE" clause. If the limit is
reached, the next number generated will be the minvalue or maxvalue,
respectively. If cycle=False (the default) any calls to nextval
after the sequence has reached its maximum value will return an
error.
.. versionadded:: 1.0.7
:param schema: optional schema name for the sequence, if located
in a schema other than the default. The rules for selecting the
schema name when a :class:`_schema.MetaData`
is also present are the same
as that of :paramref:`_schema.Table.schema`.
:param cache: optional integer value; number of future values in the
sequence which are calculated in advance. Renders the CACHE keyword
understood by Oracle and PostgreSQL.
.. versionadded:: 1.1.12
:param order: optional boolean value; if ``True``, renders the
ORDER keyword, understood by Oracle, indicating the sequence is
definitively ordered. May be necessary to provide deterministic
ordering using Oracle RAC.
.. versionadded:: 1.1.12
:param data_type: The type to be returned by the sequence, for
dialects that allow us to choose between INTEGER, BIGINT, etc.
(e.g., mssql).
.. versionadded:: 1.4.0
:param optional: boolean value, when ``True``, indicates that this
:class:`.Sequence` object only needs to be explicitly generated
on backends that don't provide another way to generate primary
key identifiers. Currently, it essentially means, "don't create
this sequence on the PostgreSQL backend, where the SERIAL keyword
creates a sequence for us automatically".
:param quote: boolean value, when ``True`` or ``False``, explicitly
forces quoting of the :paramref:`_schema.Sequence.name` on or off.
When left at its default of ``None``, normal quoting rules based
on casing and reserved words take place.
:param quote_schema: Set the quoting preferences for the ``schema``
name.
:param metadata: optional :class:`_schema.MetaData` object which this
:class:`.Sequence` will be associated with. A :class:`.Sequence`
that is associated with a :class:`_schema.MetaData`
gains the following
capabilities:
* The :class:`.Sequence` will inherit the
:paramref:`_schema.MetaData.schema`
parameter specified to the target :class:`_schema.MetaData`, which
affects the production of CREATE / DROP DDL, if any.
* The :meth:`.Sequence.create` and :meth:`.Sequence.drop` methods
automatically use the engine bound to the :class:`_schema.MetaData`
object, if any.
* The :meth:`_schema.MetaData.create_all` and
:meth:`_schema.MetaData.drop_all`
methods will emit CREATE / DROP for this :class:`.Sequence`,
even if the :class:`.Sequence` is not associated with any
:class:`_schema.Table` / :class:`_schema.Column`
that's a member of this
:class:`_schema.MetaData`.
The above behaviors can only occur if the :class:`.Sequence` is
explicitly associated with the :class:`_schema.MetaData`
via this parameter.
.. seealso::
:ref:`sequence_metadata` - full discussion of the
:paramref:`.Sequence.metadata` parameter.
:param for_update: Indicates this :class:`.Sequence`, when associated
with a :class:`_schema.Column`,
should be invoked for UPDATE statements
on that column's table, rather than for INSERT statements, when
no value is otherwise present for that column in the statement.
"""
DefaultGenerator.__init__(self, for_update=for_update)
IdentityOptions.__init__(
self,
start=start,
increment=increment,
minvalue=minvalue,
maxvalue=maxvalue,
nominvalue=nominvalue,
nomaxvalue=nomaxvalue,
cycle=cycle,
cache=cache,
order=order,
)
self.column = None
self.name = quoted_name(name, quote)
self.optional = optional
if schema is BLANK_SCHEMA:
self.schema = schema = None
elif metadata is not None and schema is None and metadata.schema:
self.schema = schema = metadata.schema
else:
self.schema = quoted_name.construct(schema, quote_schema)
self.metadata = metadata
self._key = _get_table_key(name, schema)
if metadata:
self._set_metadata(metadata)
if data_type is not None:
self.data_type = to_instance(data_type)
else:
self.data_type = None
@util.preload_module("sqlalchemy.sql.functions")
def next_value(self) -> Function[int]:
"""Return a :class:`.next_value` function element
which will render the appropriate increment function
for this :class:`.Sequence` within any SQL expression.
"""
return util.preloaded.sql_functions.func.next_value(self)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
column = parent
assert isinstance(column, Column)
super()._set_parent(column)
column._on_table_attach(self._set_table)
def _copy(self) -> Sequence:
return Sequence(
name=self.name,
start=self.start,
increment=self.increment,
minvalue=self.minvalue,
maxvalue=self.maxvalue,
nominvalue=self.nominvalue,
nomaxvalue=self.nomaxvalue,
cycle=self.cycle,
schema=self.schema,
cache=self.cache,
order=self.order,
data_type=self.data_type,
optional=self.optional,
metadata=self.metadata,
for_update=self.for_update,
)
def _set_table(self, column: Column[Any], table: Table) -> None:
self._set_metadata(table.metadata)
def _set_metadata(self, metadata: MetaData) -> None:
self.metadata = metadata
self.metadata._sequences[self._key] = self
def create(self, bind: _CreateDropBind, checkfirst: bool = True) -> None:
"""Creates this sequence in the database."""
bind._run_ddl_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst)
def drop(self, bind: _CreateDropBind, checkfirst: bool = True) -> None:
"""Drops this sequence from the database."""
bind._run_ddl_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst)
def _not_a_column_expr(self) -> NoReturn:
raise exc.InvalidRequestError(
"This %s cannot be used directly "
"as a column expression. Use func.next_value(sequence) "
"to produce a 'next value' function that's usable "
"as a column element." % self.__class__.__name__
)
@inspection._self_inspects
class FetchedValue(SchemaEventTarget):
"""A marker for a transparent database-side default.
Use :class:`.FetchedValue` when the database is configured
to provide some automatic default for a column.
E.g.::
Column('foo', Integer, FetchedValue())
Would indicate that some trigger or default generator
will create a new value for the ``foo`` column during an
INSERT.
.. seealso::
:ref:`triggered_columns`
"""
is_server_default = True
reflected = False
has_argument = False
is_clause_element = False
column: Optional[Column[Any]]
def __init__(self, for_update: bool = False) -> None:
self.for_update = for_update
def _as_for_update(self, for_update: bool) -> FetchedValue:
if for_update == self.for_update:
return self
else:
return self._clone(for_update) # type: ignore
def _copy(self) -> FetchedValue:
return FetchedValue(self.for_update)
def _clone(self, for_update: bool) -> Any:
n = self.__class__.__new__(self.__class__)
n.__dict__.update(self.__dict__)
n.__dict__.pop("column", None)
n.for_update = for_update
return n
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
column = parent
assert isinstance(column, Column)
self.column = column
if self.for_update:
self.column.server_onupdate = self
else:
self.column.server_default = self
def __repr__(self) -> str:
return util.generic_repr(self)
class DefaultClause(FetchedValue):
"""A DDL-specified DEFAULT column value.
:class:`.DefaultClause` is a :class:`.FetchedValue`
that also generates a "DEFAULT" clause when
"CREATE TABLE" is emitted.
:class:`.DefaultClause` is generated automatically
whenever the ``server_default``, ``server_onupdate`` arguments of
:class:`_schema.Column` are used. A :class:`.DefaultClause`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, server_default="50")
Is equivalent to::
Column('foo', Integer, DefaultClause("50"))
"""
has_argument = True
def __init__(
self,
arg: Union[str, ClauseElement, TextClause],
for_update: bool = False,
_reflected: bool = False,
) -> None:
util.assert_arg_type(arg, (str, ClauseElement, TextClause), "arg")
super().__init__(for_update)
self.arg = arg
self.reflected = _reflected
def _copy(self) -> DefaultClause:
return DefaultClause(
arg=self.arg, for_update=self.for_update, _reflected=self.reflected
)
def __repr__(self) -> str:
return "DefaultClause(%r, for_update=%r)" % (self.arg, self.for_update)
class Constraint(DialectKWArgs, HasConditionalDDL, SchemaItem):
"""A table-level SQL constraint.
:class:`_schema.Constraint` serves as the base class for the series of
constraint objects that can be associated with :class:`_schema.Table`
objects, including :class:`_schema.PrimaryKeyConstraint`,
:class:`_schema.ForeignKeyConstraint`
:class:`_schema.UniqueConstraint`, and
:class:`_schema.CheckConstraint`.
"""
__visit_name__ = "constraint"
_creation_order: int
_column_flag: bool
def __init__(
self,
name: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
info: Optional[_InfoType] = None,
comment: Optional[str] = None,
_create_rule: Optional[Any] = None,
_type_bound: bool = False,
**dialect_kw: Any,
) -> None:
r"""Create a SQL constraint.
:param name:
Optional, the in-database name of this ``Constraint``.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param comment: Optional string that will render an SQL comment on
foreign key constraint creation.
.. versionadded:: 2.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. See
the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
:param _create_rule:
used internally by some datatypes that also create constraints.
:param _type_bound:
used internally to indicate that this constraint is associated with
a specific datatype.
"""
self.name = name
self.deferrable = deferrable
self.initially = initially
if info:
self.info = info
self._create_rule = _create_rule
self._type_bound = _type_bound
util.set_creation_order(self)
self._validate_dialect_kwargs(dialect_kw)
self.comment = comment
def _should_create_for_compiler(
self, compiler: DDLCompiler, **kw: Any
) -> bool:
if self._create_rule is not None and not self._create_rule(compiler):
return False
elif self._ddl_if is not None:
return self._ddl_if._should_execute(
ddl.CreateConstraint(self), self, None, compiler=compiler, **kw
)
else:
return True
@property
def table(self) -> Table:
try:
if isinstance(self.parent, Table):
return self.parent
except AttributeError:
pass
raise exc.InvalidRequestError(
"This constraint is not bound to a table. Did you "
"mean to call table.append_constraint(constraint) ?"
)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
assert isinstance(parent, (Table, Column))
self.parent = parent
parent.constraints.add(self)
@util.deprecated(
"1.4",
"The :meth:`_schema.Constraint.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self: Self, **kw: Any) -> Self:
return self._copy(**kw) # type: ignore
def _copy(self: Self, **kw: Any) -> Self:
raise NotImplementedError()
class ColumnCollectionMixin:
"""A :class:`_expression.ColumnCollection` of :class:`_schema.Column`
objects.
This collection represents the columns which are referred to by
this object.
"""
_columns: DedupeColumnCollection[Column[Any]]
_allow_multiple_tables = False
_pending_colargs: List[Optional[Union[str, Column[Any]]]]
if TYPE_CHECKING:
def _set_parent_with_dispatch(
self, parent: SchemaEventTarget, **kw: Any
) -> None:
...
def __init__(
self,
*columns: _DDLColumnArgument,
_autoattach: bool = True,
_column_flag: bool = False,
_gather_expressions: Optional[
List[Union[str, ColumnElement[Any]]]
] = None,
) -> None:
self._column_flag = _column_flag
self._columns = DedupeColumnCollection()
processed_expressions: Optional[
List[Union[ColumnElement[Any], str]]
] = _gather_expressions
if processed_expressions is not None:
self._pending_colargs = []
for (
expr,
_,
_,
add_element,
) in coercions.expect_col_expression_collection(
roles.DDLConstraintColumnRole, columns
):
self._pending_colargs.append(add_element)
processed_expressions.append(expr)
else:
self._pending_colargs = [
coercions.expect(roles.DDLConstraintColumnRole, column)
for column in columns
]
if _autoattach and self._pending_colargs:
self._check_attach()
def _check_attach(self, evt: bool = False) -> None:
col_objs = [c for c in self._pending_colargs if isinstance(c, Column)]
cols_w_table = [c for c in col_objs if isinstance(c.table, Table)]
cols_wo_table = set(col_objs).difference(cols_w_table)
if cols_wo_table:
# feature #3341 - place event listeners for Column objects
# such that when all those cols are attached, we autoattach.
assert not evt, "Should not reach here on event call"
# issue #3411 - don't do the per-column auto-attach if some of the
# columns are specified as strings.
has_string_cols = {
c for c in self._pending_colargs if c is not None
}.difference(col_objs)
if not has_string_cols:
def _col_attached(column: Column[Any], table: Table) -> None:
# this isinstance() corresponds with the
# isinstance() above; only want to count Table-bound
# columns
if isinstance(table, Table):
cols_wo_table.discard(column)
if not cols_wo_table:
self._check_attach(evt=True)
self._cols_wo_table = cols_wo_table
for col in cols_wo_table:
col._on_table_attach(_col_attached)
return
columns = cols_w_table
tables = {c.table for c in columns}
if len(tables) == 1:
self._set_parent_with_dispatch(tables.pop())
elif len(tables) > 1 and not self._allow_multiple_tables:
table = columns[0].table
others = [c for c in columns[1:] if c.table is not table]
if others:
raise exc.ArgumentError(
"Column(s) %s are not part of table '%s'."
% (
", ".join("'%s'" % c for c in others),
table.description,
)
)
@util.ro_memoized_property
def columns(self) -> ReadOnlyColumnCollection[str, Column[Any]]:
return self._columns.as_readonly()
@util.ro_memoized_property
def c(self) -> ReadOnlyColumnCollection[str, Column[Any]]:
return self._columns.as_readonly()
def _col_expressions(
self, parent: Union[Table, Column[Any]]
) -> List[Optional[Column[Any]]]:
if isinstance(parent, Column):
result: List[Optional[Column[Any]]] = [
c for c in self._pending_colargs if isinstance(c, Column)
]
assert len(result) == len(self._pending_colargs)
return result
else:
return [
parent.c[col] if isinstance(col, str) else col
for col in self._pending_colargs
]
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
assert isinstance(parent, (Table, Column))
for col in self._col_expressions(parent):
if col is not None:
self._columns.add(col)
class ColumnCollectionConstraint(ColumnCollectionMixin, Constraint):
"""A constraint that proxies a ColumnCollection."""
def __init__(
self,
*columns: _DDLColumnArgument,
name: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
info: Optional[_InfoType] = None,
_autoattach: bool = True,
_column_flag: bool = False,
_gather_expressions: Optional[List[_DDLColumnArgument]] = None,
**dialect_kw: Any,
) -> None:
r"""
:param \*columns:
A sequence of column names or Column objects.
:param name:
Optional, the in-database name of this constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param \**dialect_kw: other keyword arguments including
dialect-specific arguments are propagated to the :class:`.Constraint`
superclass.
"""
Constraint.__init__(
self,
name=name,
deferrable=deferrable,
initially=initially,
info=info,
**dialect_kw,
)
ColumnCollectionMixin.__init__(
self, *columns, _autoattach=_autoattach, _column_flag=_column_flag
)
columns: ReadOnlyColumnCollection[str, Column[Any]]
"""A :class:`_expression.ColumnCollection` representing the set of columns
for this constraint.
"""
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
assert isinstance(parent, (Column, Table))
Constraint._set_parent(self, parent)
ColumnCollectionMixin._set_parent(self, parent)
def __contains__(self, x: Any) -> bool:
return x in self._columns
@util.deprecated(
"1.4",
"The :meth:`_schema.ColumnCollectionConstraint.copy` method "
"is deprecated and will be removed in a future release.",
)
def copy(
self,
*,
target_table: Optional[Table] = None,
**kw: Any,
) -> ColumnCollectionConstraint:
return self._copy(target_table=target_table, **kw)
def _copy(
self,
*,
target_table: Optional[Table] = None,
**kw: Any,
) -> ColumnCollectionConstraint:
# ticket #5276
constraint_kwargs = {}
for dialect_name in self.dialect_options:
dialect_options = self.dialect_options[dialect_name]._non_defaults
for (
dialect_option_key,
dialect_option_value,
) in dialect_options.items():
constraint_kwargs[
dialect_name + "_" + dialect_option_key
] = dialect_option_value
assert isinstance(self.parent, Table)
c = self.__class__(
name=self.name,
deferrable=self.deferrable,
initially=self.initially,
*[
_copy_expression(expr, self.parent, target_table)
for expr in self._columns
],
comment=self.comment,
**constraint_kwargs,
)
return self._schema_item_copy(c)
def contains_column(self, col: Column[Any]) -> bool:
"""Return True if this constraint contains the given column.
Note that this object also contains an attribute ``.columns``
which is a :class:`_expression.ColumnCollection` of
:class:`_schema.Column` objects.
"""
return self._columns.contains_column(col)
def __iter__(self) -> Iterator[Column[Any]]:
return iter(self._columns)
def __len__(self) -> int:
return len(self._columns)
class CheckConstraint(ColumnCollectionConstraint):
"""A table- or column-level CHECK constraint.
Can be included in the definition of a Table or Column.
"""
_allow_multiple_tables = True
__visit_name__ = "table_or_column_check_constraint"
@_document_text_coercion(
"sqltext",
":class:`.CheckConstraint`",
":paramref:`.CheckConstraint.sqltext`",
)
def __init__(
self,
sqltext: _TextCoercedExpressionArgument[Any],
name: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
table: Optional[Table] = None,
info: Optional[_InfoType] = None,
_create_rule: Optional[Any] = None,
_autoattach: bool = True,
_type_bound: bool = False,
**dialect_kw: Any,
) -> None:
r"""Construct a CHECK constraint.
:param sqltext:
A string containing the constraint definition, which will be used
verbatim, or a SQL expression construct. If given as a string,
the object is converted to a :func:`_expression.text` object.
If the textual
string includes a colon character, escape this using a backslash::
CheckConstraint(r"foo ~ E'a(?\:b|c)d")
:param name:
Optional, the in-database name of the constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
"""
self.sqltext = coercions.expect(roles.DDLExpressionRole, sqltext)
columns: List[Column[Any]] = []
visitors.traverse(self.sqltext, {}, {"column": columns.append})
super().__init__(
name=name,
deferrable=deferrable,
initially=initially,
_create_rule=_create_rule,
info=info,
_type_bound=_type_bound,
_autoattach=_autoattach,
*columns,
**dialect_kw,
)
if table is not None:
self._set_parent_with_dispatch(table)
@property
def is_column_level(self) -> bool:
return not isinstance(self.parent, Table)
@util.deprecated(
"1.4",
"The :meth:`_schema.CheckConstraint.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(
self, *, target_table: Optional[Table] = None, **kw: Any
) -> CheckConstraint:
return self._copy(target_table=target_table, **kw)
def _copy(
self, *, target_table: Optional[Table] = None, **kw: Any
) -> CheckConstraint:
if target_table is not None:
# note that target_table is None for the copy process of
# a column-bound CheckConstraint, so this path is not reached
# in that case.
sqltext = _copy_expression(self.sqltext, self.table, target_table)
else:
sqltext = self.sqltext
c = CheckConstraint(
sqltext,
name=self.name,
initially=self.initially,
deferrable=self.deferrable,
_create_rule=self._create_rule,
table=target_table,
comment=self.comment,
_autoattach=False,
_type_bound=self._type_bound,
)
return self._schema_item_copy(c)
class ForeignKeyConstraint(ColumnCollectionConstraint):
"""A table-level FOREIGN KEY constraint.
Defines a single column or composite FOREIGN KEY ... REFERENCES
constraint. For a no-frills, single column foreign key, adding a
:class:`_schema.ForeignKey` to the definition of a :class:`_schema.Column`
is a
shorthand equivalent for an unnamed, single column
:class:`_schema.ForeignKeyConstraint`.
Examples of foreign key configuration are in :ref:`metadata_foreignkeys`.
"""
__visit_name__ = "foreign_key_constraint"
def __init__(
self,
columns: _typing_Sequence[_DDLColumnArgument],
refcolumns: _typing_Sequence[_DDLColumnArgument],
name: Optional[str] = None,
onupdate: Optional[str] = None,
ondelete: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
use_alter: bool = False,
link_to_name: bool = False,
match: Optional[str] = None,
table: Optional[Table] = None,
info: Optional[_InfoType] = None,
comment: Optional[str] = None,
**dialect_kw: Any,
) -> None:
r"""Construct a composite-capable FOREIGN KEY.
:param columns: A sequence of local column names. The named columns
must be defined and present in the parent Table. The names should
match the ``key`` given to each column (defaults to the name) unless
``link_to_name`` is True.
:param refcolumns: A sequence of foreign column names or Column
objects. The columns must all be located within the same Table.
:param name: Optional, the in-database name of the key.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally assigned
``key``.
:param use_alter: If True, do not emit the DDL for this constraint as
part of the CREATE TABLE definition. Instead, generate it via an
ALTER TABLE statement issued after the full collection of tables
have been created, and drop it via an ALTER TABLE statement before
the full collection of tables are dropped.
The use of :paramref:`_schema.ForeignKeyConstraint.use_alter` is
particularly geared towards the case where two or more tables
are established within a mutually-dependent foreign key constraint
relationship; however, the :meth:`_schema.MetaData.create_all` and
:meth:`_schema.MetaData.drop_all`
methods will perform this resolution
automatically, so the flag is normally not needed.
.. versionchanged:: 1.0.0 Automatic resolution of foreign key
cycles has been added, removing the need to use the
:paramref:`_schema.ForeignKeyConstraint.use_alter` in typical use
cases.
.. seealso::
:ref:`use_alter`
:param match: Optional string. If set, emit MATCH <value> when issuing
DDL for this constraint. Typical values include SIMPLE, PARTIAL
and FULL.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param comment: Optional string that will render an SQL comment on
foreign key constraint creation.
.. versionadded:: 2.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. See
the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
.. versionadded:: 0.9.2
"""
Constraint.__init__(
self,
name=name,
deferrable=deferrable,
initially=initially,
info=info,
comment=comment,
**dialect_kw,
)
self.onupdate = onupdate
self.ondelete = ondelete
self.link_to_name = link_to_name
self.use_alter = use_alter
self.match = match
if len(set(columns)) != len(refcolumns):
if len(set(columns)) != len(columns):
# e.g. FOREIGN KEY (a, a) REFERENCES r (b, c)
raise exc.ArgumentError(
"ForeignKeyConstraint with duplicate source column "
"references are not supported."
)
else:
# e.g. FOREIGN KEY (a) REFERENCES r (b, c)
# paraphrasing
# https://www.postgresql.org/docs/current/static/ddl-constraints.html
raise exc.ArgumentError(
"ForeignKeyConstraint number "
"of constrained columns must match the number of "
"referenced columns."
)
# standalone ForeignKeyConstraint - create
# associated ForeignKey objects which will be applied to hosted
# Column objects (in col.foreign_keys), either now or when attached
# to the Table for string-specified names
self.elements = [
ForeignKey(
refcol,
_constraint=self,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
link_to_name=self.link_to_name,
match=self.match,
deferrable=self.deferrable,
initially=self.initially,
**self.dialect_kwargs,
)
for refcol in refcolumns
]
ColumnCollectionMixin.__init__(self, *columns)
if table is not None:
if hasattr(self, "parent"):
assert table is self.parent
self._set_parent_with_dispatch(table)
def _append_element(self, column: Column[Any], fk: ForeignKey) -> None:
self._columns.add(column)
self.elements.append(fk)
columns: ReadOnlyColumnCollection[str, Column[Any]]
"""A :class:`_expression.ColumnCollection` representing the set of columns
for this constraint.
"""
elements: List[ForeignKey]
"""A sequence of :class:`_schema.ForeignKey` objects.
Each :class:`_schema.ForeignKey`
represents a single referring column/referred
column pair.
This collection is intended to be read-only.
"""
@property
def _elements(self) -> util.OrderedDict[str, ForeignKey]:
# legacy - provide a dictionary view of (column_key, fk)
return util.OrderedDict(zip(self.column_keys, self.elements))
@property
def _referred_schema(self) -> Optional[str]:
for elem in self.elements:
return elem._referred_schema
else:
return None
@property
def referred_table(self) -> Table:
"""The :class:`_schema.Table` object to which this
:class:`_schema.ForeignKeyConstraint` references.
This is a dynamically calculated attribute which may not be available
if the constraint and/or parent table is not yet associated with
a metadata collection that contains the referred table.
.. versionadded:: 1.0.0
"""
return self.elements[0].column.table
def _validate_dest_table(self, table: Table) -> None:
table_keys = {elem._table_key() for elem in self.elements}
if None not in table_keys and len(table_keys) > 1:
elem0, elem1 = sorted(table_keys)[0:2]
raise exc.ArgumentError(
"ForeignKeyConstraint on %s(%s) refers to "
"multiple remote tables: %s and %s"
% (table.fullname, self._col_description, elem0, elem1)
)
@property
def column_keys(self) -> _typing_Sequence[str]:
"""Return a list of string keys representing the local
columns in this :class:`_schema.ForeignKeyConstraint`.
This list is either the original string arguments sent
to the constructor of the :class:`_schema.ForeignKeyConstraint`,
or if the constraint has been initialized with :class:`_schema.Column`
objects, is the string ``.key`` of each element.
.. versionadded:: 1.0.0
"""
if hasattr(self, "parent"):
return self._columns.keys()
else:
return [
col.key if isinstance(col, ColumnElement) else str(col)
for col in self._pending_colargs
]
@property
def _col_description(self) -> str:
return ", ".join(self.column_keys)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
table = parent
assert isinstance(table, Table)
Constraint._set_parent(self, table)
try:
ColumnCollectionConstraint._set_parent(self, table)
except KeyError as ke:
raise exc.ArgumentError(
"Can't create ForeignKeyConstraint "
"on table '%s': no column "
"named '%s' is present." % (table.description, ke.args[0])
) from ke
for col, fk in zip(self._columns, self.elements):
if not hasattr(fk, "parent") or fk.parent is not col:
fk._set_parent_with_dispatch(col)
self._validate_dest_table(table)
@util.deprecated(
"1.4",
"The :meth:`_schema.ForeignKeyConstraint.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(
self,
*,
schema: Optional[str] = None,
target_table: Optional[Table] = None,
**kw: Any,
) -> ForeignKeyConstraint:
return self._copy(schema=schema, target_table=target_table, **kw)
def _copy(
self,
*,
schema: Optional[str] = None,
target_table: Optional[Table] = None,
**kw: Any,
) -> ForeignKeyConstraint:
fkc = ForeignKeyConstraint(
[x.parent.key for x in self.elements],
[
x._get_colspec(
schema=schema,
table_name=target_table.name
if target_table is not None
and x._table_key() == x.parent.table.key
else None,
_is_copy=True,
)
for x in self.elements
],
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name,
match=self.match,
comment=self.comment,
)
for self_fk, other_fk in zip(self.elements, fkc.elements):
self_fk._schema_item_copy(other_fk)
return self._schema_item_copy(fkc)
class PrimaryKeyConstraint(ColumnCollectionConstraint):
"""A table-level PRIMARY KEY constraint.
The :class:`.PrimaryKeyConstraint` object is present automatically
on any :class:`_schema.Table` object; it is assigned a set of
:class:`_schema.Column` objects corresponding to those marked with
the :paramref:`_schema.Column.primary_key` flag::
>>> my_table = Table('mytable', metadata,
... Column('id', Integer, primary_key=True),
... Column('version_id', Integer, primary_key=True),
... Column('data', String(50))
... )
>>> my_table.primary_key
PrimaryKeyConstraint(
Column('id', Integer(), table=<mytable>,
primary_key=True, nullable=False),
Column('version_id', Integer(), table=<mytable>,
primary_key=True, nullable=False)
)
The primary key of a :class:`_schema.Table` can also be specified by using
a :class:`.PrimaryKeyConstraint` object explicitly; in this mode of usage,
the "name" of the constraint can also be specified, as well as other
options which may be recognized by dialects::
my_table = Table('mytable', metadata,
Column('id', Integer),
Column('version_id', Integer),
Column('data', String(50)),
PrimaryKeyConstraint('id', 'version_id',
name='mytable_pk')
)
The two styles of column-specification should generally not be mixed.
An warning is emitted if the columns present in the
:class:`.PrimaryKeyConstraint`
don't match the columns that were marked as ``primary_key=True``, if both
are present; in this case, the columns are taken strictly from the
:class:`.PrimaryKeyConstraint` declaration, and those columns otherwise
marked as ``primary_key=True`` are ignored. This behavior is intended to
be backwards compatible with previous behavior.
.. versionchanged:: 0.9.2 Using a mixture of columns within a
:class:`.PrimaryKeyConstraint` in addition to columns marked as
``primary_key=True`` now emits a warning if the lists don't match.
The ultimate behavior of ignoring those columns marked with the flag
only is currently maintained for backwards compatibility; this warning
may raise an exception in a future release.
For the use case where specific options are to be specified on the
:class:`.PrimaryKeyConstraint`, but the usual style of using
``primary_key=True`` flags is still desirable, an empty
:class:`.PrimaryKeyConstraint` may be specified, which will take on the
primary key column collection from the :class:`_schema.Table` based on the
flags::
my_table = Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('version_id', Integer, primary_key=True),
Column('data', String(50)),
PrimaryKeyConstraint(name='mytable_pk',
mssql_clustered=True)
)
.. versionadded:: 0.9.2 an empty :class:`.PrimaryKeyConstraint` may now
be specified for the purposes of establishing keyword arguments with
the constraint, independently of the specification of "primary key"
columns within the :class:`_schema.Table` itself; columns marked as
``primary_key=True`` will be gathered into the empty constraint's
column collection.
"""
__visit_name__ = "primary_key_constraint"
def __init__(
self,
*columns: _DDLColumnArgument,
name: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
info: Optional[_InfoType] = None,
_implicit_generated: bool = False,
**dialect_kw: Any,
) -> None:
self._implicit_generated = _implicit_generated
super().__init__(
*columns,
name=name,
deferrable=deferrable,
initially=initially,
info=info,
**dialect_kw,
)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
table = parent
assert isinstance(table, Table)
super()._set_parent(table)
if table.primary_key is not self:
table.constraints.discard(table.primary_key)
table.primary_key = self # type: ignore
table.constraints.add(self)
table_pks = [c for c in table.c if c.primary_key]
if (
self._columns
and table_pks
and set(table_pks) != set(self._columns)
):
util.warn(
"Table '%s' specifies columns %s as primary_key=True, "
"not matching locally specified columns %s; setting the "
"current primary key columns to %s. This warning "
"may become an exception in a future release"
% (
table.name,
", ".join("'%s'" % c.name for c in table_pks),
", ".join("'%s'" % c.name for c in self._columns),
", ".join("'%s'" % c.name for c in self._columns),
)
)
table_pks[:] = []
for c in self._columns:
c.primary_key = True
if c._user_defined_nullable is NULL_UNSPECIFIED:
c.nullable = False
if table_pks:
self._columns.extend(table_pks)
def _reload(self, columns: Iterable[Column[Any]]) -> None:
"""repopulate this :class:`.PrimaryKeyConstraint` given
a set of columns.
Existing columns in the table that are marked as primary_key=True
are maintained.
Also fires a new event.
This is basically like putting a whole new
:class:`.PrimaryKeyConstraint` object on the parent
:class:`_schema.Table` object without actually replacing the object.
The ordering of the given list of columns is also maintained; these
columns will be appended to the list of columns after any which
are already present.
"""
# set the primary key flag on new columns.
# note any existing PK cols on the table also have their
# flag still set.
for col in columns:
col.primary_key = True
self._columns.extend(columns)
PrimaryKeyConstraint._autoincrement_column._reset(self) # type: ignore
self._set_parent_with_dispatch(self.table)
def _replace(self, col: Column[Any]) -> None:
PrimaryKeyConstraint._autoincrement_column._reset(self) # type: ignore
self._columns.replace(col)
self.dispatch._sa_event_column_added_to_pk_constraint(self, col)
@property
def columns_autoinc_first(self) -> List[Column[Any]]:
autoinc = self._autoincrement_column
if autoinc is not None:
return [autoinc] + [c for c in self._columns if c is not autoinc]
else:
return list(self._columns)
@util.ro_memoized_property
def _autoincrement_column(self) -> Optional[Column[Any]]:
def _validate_autoinc(col: Column[Any], autoinc_true: bool) -> bool:
if col.type._type_affinity is None or not issubclass(
col.type._type_affinity,
(
type_api.INTEGERTYPE._type_affinity,
type_api.NUMERICTYPE._type_affinity,
),
):
if autoinc_true:
raise exc.ArgumentError(
"Column type %s on column '%s' is not "
"compatible with autoincrement=True" % (col.type, col)
)
else:
return False
elif (
not isinstance(col.default, (type(None), Sequence))
and not autoinc_true
):
return False
elif (
col.server_default is not None
and not isinstance(col.server_default, Identity)
and not autoinc_true
):
return False
elif col.foreign_keys and col.autoincrement not in (
True,
"ignore_fk",
):
return False
return True
if len(self._columns) == 1:
col = list(self._columns)[0]
if col.autoincrement is True:
_validate_autoinc(col, True)
return col
elif col.autoincrement in (
"auto",
"ignore_fk",
) and _validate_autoinc(col, False):
return col
else:
return None
else:
autoinc = None
for col in self._columns:
if col.autoincrement is True:
_validate_autoinc(col, True)
if autoinc is not None:
raise exc.ArgumentError(
"Only one Column may be marked "
"autoincrement=True, found both %s and %s."
% (col.name, autoinc.name)
)
else:
autoinc = col
return autoinc
class UniqueConstraint(ColumnCollectionConstraint):
"""A table-level UNIQUE constraint.
Defines a single column or composite UNIQUE constraint. For a no-frills,
single column constraint, adding ``unique=True`` to the ``Column``
definition is a shorthand equivalent for an unnamed, single column
UniqueConstraint.
"""
__visit_name__ = "unique_constraint"
class Index(
DialectKWArgs, ColumnCollectionMixin, HasConditionalDDL, SchemaItem
):
"""A table-level INDEX.
Defines a composite (one or more column) INDEX.
E.g.::
sometable = Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100))
)
Index("some_index", sometable.c.name)
For a no-frills, single column index, adding
:class:`_schema.Column` also supports ``index=True``::
sometable = Table("sometable", metadata,
Column("name", String(50), index=True)
)
For a composite index, multiple columns can be specified::
Index("some_index", sometable.c.name, sometable.c.address)
Functional indexes are supported as well, typically by using the
:data:`.func` construct in conjunction with table-bound
:class:`_schema.Column` objects::
Index("some_index", func.lower(sometable.c.name))
An :class:`.Index` can also be manually associated with a
:class:`_schema.Table`,
either through inline declaration or using
:meth:`_schema.Table.append_constraint`. When this approach is used,
the names
of the indexed columns can be specified as strings::
Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", "name", "address")
)
To support functional or expression-based indexes in this form, the
:func:`_expression.text` construct may be used::
from sqlalchemy import text
Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", text("lower(name)"))
)
.. versionadded:: 0.9.5 the :func:`_expression.text`
construct may be used to
specify :class:`.Index` expressions, provided the :class:`.Index`
is explicitly associated with the :class:`_schema.Table`.
.. seealso::
:ref:`schema_indexes` - General information on :class:`.Index`.
:ref:`postgresql_indexes` - PostgreSQL-specific options available for
the :class:`.Index` construct.
:ref:`mysql_indexes` - MySQL-specific options available for the
:class:`.Index` construct.
:ref:`mssql_indexes` - MSSQL-specific options available for the
:class:`.Index` construct.
"""
__visit_name__ = "index"
table: Optional[Table]
expressions: _typing_Sequence[Union[str, ColumnElement[Any]]]
_table_bound_expressions: _typing_Sequence[ColumnElement[Any]]
def __init__(
self,
name: Optional[str],
*expressions: _DDLColumnArgument,
unique: bool = False,
quote: Optional[bool] = None,
info: Optional[_InfoType] = None,
_table: Optional[Table] = None,
_column_flag: bool = False,
**dialect_kw: Any,
) -> None:
r"""Construct an index object.
:param name:
The name of the index
:param \*expressions:
Column expressions to include in the index. The expressions
are normally instances of :class:`_schema.Column`, but may also
be arbitrary SQL expressions which ultimately refer to a
:class:`_schema.Column`.
:param unique=False:
Keyword only argument; if True, create a unique index.
:param quote=None:
Keyword only argument; whether to apply quoting to the name of
the index. Works in the same manner as that of
:paramref:`_schema.Column.quote`.
:param info=None: Optional data dictionary which will be populated
into the :attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**dialect_kw: Additional keyword arguments not mentioned above
are dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
"""
self.table = table = None
self.name = quoted_name.construct(name, quote)
self.unique = unique
if info is not None:
self.info = info
# TODO: consider "table" argument being public, but for
# the purpose of the fix here, it starts as private.
if _table is not None:
table = _table
self._validate_dialect_kwargs(dialect_kw)
self.expressions = []
# will call _set_parent() if table-bound column
# objects are present
ColumnCollectionMixin.__init__(
self,
*expressions,
_column_flag=_column_flag,
_gather_expressions=self.expressions,
)
if table is not None:
self._set_parent(table)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
table = parent
assert isinstance(table, Table)
ColumnCollectionMixin._set_parent(self, table)
if self.table is not None and table is not self.table:
raise exc.ArgumentError(
"Index '%s' is against table '%s', and "
"cannot be associated with table '%s'."
% (self.name, self.table.description, table.description)
)
self.table = table
table.indexes.add(self)
expressions = self.expressions
col_expressions = self._col_expressions(table)
assert len(expressions) == len(col_expressions)
exprs = []
for expr, colexpr in zip(expressions, col_expressions):
if isinstance(expr, ClauseElement):
exprs.append(expr)
elif colexpr is not None:
exprs.append(colexpr)
else:
assert False
self.expressions = self._table_bound_expressions = exprs
def create(self, bind: _CreateDropBind, checkfirst: bool = False) -> None:
"""Issue a ``CREATE`` statement for this
:class:`.Index`, using the given
:class:`.Connection` or :class:`.Engine`` for connectivity.
.. seealso::
:meth:`_schema.MetaData.create_all`.
"""
bind._run_ddl_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst)
def drop(self, bind: _CreateDropBind, checkfirst: bool = False) -> None:
"""Issue a ``DROP`` statement for this
:class:`.Index`, using the given
:class:`.Connection` or :class:`.Engine` for connectivity.
.. seealso::
:meth:`_schema.MetaData.drop_all`.
"""
bind._run_ddl_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst)
def __repr__(self) -> str:
exprs: _typing_Sequence[Any] # noqa: F842
return "Index(%s)" % (
", ".join(
[repr(self.name)]
+ [repr(e) for e in self.expressions]
+ (self.unique and ["unique=True"] or [])
)
)
DEFAULT_NAMING_CONVENTION: util.immutabledict[str, str] = util.immutabledict(
{"ix": "ix_%(column_0_label)s"}
)
class MetaData(HasSchemaAttr):
"""A collection of :class:`_schema.Table`
objects and their associated schema
constructs.
Holds a collection of :class:`_schema.Table` objects as well as
an optional binding to an :class:`_engine.Engine` or
:class:`_engine.Connection`. If bound, the :class:`_schema.Table` objects
in the collection and their columns may participate in implicit SQL
execution.
The :class:`_schema.Table` objects themselves are stored in the
:attr:`_schema.MetaData.tables` dictionary.
:class:`_schema.MetaData` is a thread-safe object for read operations.
Construction of new tables within a single :class:`_schema.MetaData`
object,
either explicitly or via reflection, may not be completely thread-safe.
.. seealso::
:ref:`metadata_describing` - Introduction to database metadata
"""
__visit_name__ = "metadata"
def __init__(
self,
schema: Optional[str] = None,
quote_schema: Optional[bool] = None,
naming_convention: Optional[Dict[str, str]] = None,
info: Optional[_InfoType] = None,
) -> None:
"""Create a new MetaData object.
:param schema:
The default schema to use for the :class:`_schema.Table`,
:class:`.Sequence`, and potentially other objects associated with
this :class:`_schema.MetaData`. Defaults to ``None``.
.. seealso::
:ref:`schema_metadata_schema_name` - details on how the
:paramref:`_schema.MetaData.schema` parameter is used.
:paramref:`_schema.Table.schema`
:paramref:`.Sequence.schema`
:param quote_schema:
Sets the ``quote_schema`` flag for those :class:`_schema.Table`,
:class:`.Sequence`, and other objects which make usage of the
local ``schema`` name.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param naming_convention: a dictionary referring to values which
will establish default naming conventions for :class:`.Constraint`
and :class:`.Index` objects, for those objects which are not given
a name explicitly.
The keys of this dictionary may be:
* a constraint or Index class, e.g. the :class:`.UniqueConstraint`,
:class:`_schema.ForeignKeyConstraint` class, the :class:`.Index`
class
* a string mnemonic for one of the known constraint classes;
``"fk"``, ``"pk"``, ``"ix"``, ``"ck"``, ``"uq"`` for foreign key,
primary key, index, check, and unique constraint, respectively.
* the string name of a user-defined "token" that can be used
to define new naming tokens.
The values associated with each "constraint class" or "constraint
mnemonic" key are string naming templates, such as
``"uq_%(table_name)s_%(column_0_name)s"``,
which describe how the name should be composed. The values
associated with user-defined "token" keys should be callables of the
form ``fn(constraint, table)``, which accepts the constraint/index
object and :class:`_schema.Table` as arguments, returning a string
result.
The built-in names are as follows, some of which may only be
available for certain types of constraint:
* ``%(table_name)s`` - the name of the :class:`_schema.Table`
object
associated with the constraint.
* ``%(referred_table_name)s`` - the name of the
:class:`_schema.Table`
object associated with the referencing target of a
:class:`_schema.ForeignKeyConstraint`.
* ``%(column_0_name)s`` - the name of the :class:`_schema.Column`
at
index position "0" within the constraint.
* ``%(column_0N_name)s`` - the name of all :class:`_schema.Column`
objects in order within the constraint, joined without a
separator.
* ``%(column_0_N_name)s`` - the name of all
:class:`_schema.Column`
objects in order within the constraint, joined with an
underscore as a separator.
* ``%(column_0_label)s``, ``%(column_0N_label)s``,
``%(column_0_N_label)s`` - the label of either the zeroth
:class:`_schema.Column` or all :class:`.Columns`, separated with
or without an underscore
* ``%(column_0_key)s``, ``%(column_0N_key)s``,
``%(column_0_N_key)s`` - the key of either the zeroth
:class:`_schema.Column` or all :class:`.Columns`, separated with
or without an underscore
* ``%(referred_column_0_name)s``, ``%(referred_column_0N_name)s``
``%(referred_column_0_N_name)s``, ``%(referred_column_0_key)s``,
``%(referred_column_0N_key)s``, ... column tokens which
render the names/keys/labels of columns that are referenced
by a :class:`_schema.ForeignKeyConstraint`.
* ``%(constraint_name)s`` - a special key that refers to the
existing name given to the constraint. When this key is
present, the :class:`.Constraint` object's existing name will be
replaced with one that is composed from template string that
uses this token. When this token is present, it is required that
the :class:`.Constraint` is given an explicit name ahead of time.
* user-defined: any additional token may be implemented by passing
it along with a ``fn(constraint, table)`` callable to the
naming_convention dictionary.
.. versionadded:: 1.3.0 - added new ``%(column_0N_name)s``,
``%(column_0_N_name)s``, and related tokens that produce
concatenations of names, keys, or labels for all columns referred
to by a given constraint.
.. seealso::
:ref:`constraint_naming_conventions` - for detailed usage
examples.
"""
self.tables = util.FacadeDict()
self.schema = quoted_name.construct(schema, quote_schema)
self.naming_convention = (
naming_convention
if naming_convention
else DEFAULT_NAMING_CONVENTION
)
if info:
self.info = info
self._schemas: Set[str] = set()
self._sequences: Dict[str, Sequence] = {}
self._fk_memos: Dict[
Tuple[str, Optional[str]], List[ForeignKey]
] = collections.defaultdict(list)
tables: util.FacadeDict[str, Table]
"""A dictionary of :class:`_schema.Table`
objects keyed to their name or "table key".
The exact key is that determined by the :attr:`_schema.Table.key`
attribute;
for a table with no :attr:`_schema.Table.schema` attribute,
this is the same
as :attr:`_schema.Table.name`. For a table with a schema,
it is typically of the
form ``schemaname.tablename``.
.. seealso::
:attr:`_schema.MetaData.sorted_tables`
"""
def __repr__(self) -> str:
return "MetaData()"
def __contains__(self, table_or_key: Union[str, Table]) -> bool:
if not isinstance(table_or_key, str):
table_or_key = table_or_key.key
return table_or_key in self.tables
def _add_table(
self, name: str, schema: Optional[str], table: Table
) -> None:
key = _get_table_key(name, schema)
self.tables._insert_item(key, table)
if schema:
self._schemas.add(schema)
def _remove_table(self, name: str, schema: Optional[str]) -> None:
key = _get_table_key(name, schema)
removed = dict.pop(self.tables, key, None) # type: ignore
if removed is not None:
for fk in removed.foreign_keys:
fk._remove_from_metadata(self)
if self._schemas:
self._schemas = {
t.schema for t in self.tables.values() if t.schema is not None
}
def __getstate__(self) -> Dict[str, Any]:
return {
"tables": self.tables,
"schema": self.schema,
"schemas": self._schemas,
"sequences": self._sequences,
"fk_memos": self._fk_memos,
"naming_convention": self.naming_convention,
}
def __setstate__(self, state: Dict[str, Any]) -> None:
self.tables = state["tables"]
self.schema = state["schema"]
self.naming_convention = state["naming_convention"]
self._sequences = state["sequences"]
self._schemas = state["schemas"]
self._fk_memos = state["fk_memos"]
def clear(self) -> None:
"""Clear all Table objects from this MetaData."""
dict.clear(self.tables) # type: ignore
self._schemas.clear()
self._fk_memos.clear()
def remove(self, table: Table) -> None:
"""Remove the given Table object from this MetaData."""
self._remove_table(table.name, table.schema)
@property
def sorted_tables(self) -> List[Table]:
"""Returns a list of :class:`_schema.Table` objects sorted in order of
foreign key dependency.
The sorting will place :class:`_schema.Table`
objects that have dependencies
first, before the dependencies themselves, representing the
order in which they can be created. To get the order in which
the tables would be dropped, use the ``reversed()`` Python built-in.
.. warning::
The :attr:`.MetaData.sorted_tables` attribute cannot by itself
accommodate automatic resolution of dependency cycles between
tables, which are usually caused by mutually dependent foreign key
constraints. When these cycles are detected, the foreign keys
of these tables are omitted from consideration in the sort.
A warning is emitted when this condition occurs, which will be an
exception raise in a future release. Tables which are not part
of the cycle will still be returned in dependency order.
To resolve these cycles, the
:paramref:`_schema.ForeignKeyConstraint.use_alter` parameter may be
applied to those constraints which create a cycle. Alternatively,
the :func:`_schema.sort_tables_and_constraints` function will
automatically return foreign key constraints in a separate
collection when cycles are detected so that they may be applied
to a schema separately.
.. versionchanged:: 1.3.17 - a warning is emitted when
:attr:`.MetaData.sorted_tables` cannot perform a proper sort
due to cyclical dependencies. This will be an exception in a
future release. Additionally, the sort will continue to return
other tables not involved in the cycle in dependency order which
was not the case previously.
.. seealso::
:func:`_schema.sort_tables`
:func:`_schema.sort_tables_and_constraints`
:attr:`_schema.MetaData.tables`
:meth:`_reflection.Inspector.get_table_names`
:meth:`_reflection.Inspector.get_sorted_table_and_fkc_names`
"""
return ddl.sort_tables(
sorted(self.tables.values(), key=lambda t: t.key) # type: ignore
)
@util.preload_module("sqlalchemy.engine.reflection")
def reflect(
self,
bind: Union[Engine, Connection],
schema: Optional[str] = None,
views: bool = False,
only: Optional[_typing_Sequence[str]] = None,
extend_existing: bool = False,
autoload_replace: bool = True,
resolve_fks: bool = True,
**dialect_kwargs: Any,
) -> None:
r"""Load all available table definitions from the database.
Automatically creates ``Table`` entries in this ``MetaData`` for any
table available in the database but not yet present in the
``MetaData``. May be called multiple times to pick up tables recently
added to the database, however no special action is taken if a table
in this ``MetaData`` no longer exists in the database.
:param bind:
A :class:`.Connection` or :class:`.Engine` used to access the
database.
:param schema:
Optional, query and reflect tables from an alternate schema.
If None, the schema associated with this :class:`_schema.MetaData`
is used, if any.
:param views:
If True, also reflect views (materialized and plain).
:param only:
Optional. Load only a sub-set of available named tables. May be
specified as a sequence of names or a callable.
If a sequence of names is provided, only those tables will be
reflected. An error is raised if a table is requested but not
available. Named tables already present in this ``MetaData`` are
ignored.
If a callable is provided, it will be used as a boolean predicate to
filter the list of potential table names. The callable is called
with a table name and this ``MetaData`` instance as positional
arguments and should return a true value for any table to reflect.
:param extend_existing: Passed along to each :class:`_schema.Table` as
:paramref:`_schema.Table.extend_existing`.
.. versionadded:: 0.9.1
:param autoload_replace: Passed along to each :class:`_schema.Table`
as
:paramref:`_schema.Table.autoload_replace`.
.. versionadded:: 0.9.1
:param resolve_fks: if True, reflect :class:`_schema.Table`
objects linked
to :class:`_schema.ForeignKey` objects located in each
:class:`_schema.Table`.
For :meth:`_schema.MetaData.reflect`,
this has the effect of reflecting
related tables that might otherwise not be in the list of tables
being reflected, for example if the referenced table is in a
different schema or is omitted via the
:paramref:`.MetaData.reflect.only` parameter. When False,
:class:`_schema.ForeignKey` objects are not followed to the
:class:`_schema.Table`
in which they link, however if the related table is also part of the
list of tables that would be reflected in any case, the
:class:`_schema.ForeignKey` object will still resolve to its related
:class:`_schema.Table` after the :meth:`_schema.MetaData.reflect`
operation is
complete. Defaults to True.
.. versionadded:: 1.3.0
.. seealso::
:paramref:`_schema.Table.resolve_fks`
:param \**dialect_kwargs: Additional keyword arguments not mentioned
above are dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
.. versionadded:: 0.9.2 - Added
:paramref:`.MetaData.reflect.**dialect_kwargs` to support
dialect-level reflection options for all :class:`_schema.Table`
objects reflected.
"""
with inspection.inspect(bind)._inspection_context() as insp:
reflect_opts: Any = {
"autoload_with": insp,
"extend_existing": extend_existing,
"autoload_replace": autoload_replace,
"resolve_fks": resolve_fks,
"_extend_on": set(),
}
reflect_opts.update(dialect_kwargs)
if schema is None:
schema = self.schema
if schema is not None:
reflect_opts["schema"] = schema
kind = util.preloaded.engine_reflection.ObjectKind.TABLE
available: util.OrderedSet[str] = util.OrderedSet(
insp.get_table_names(schema)
)
if views:
kind = util.preloaded.engine_reflection.ObjectKind.ANY
available.update(insp.get_view_names(schema))
try:
available.update(insp.get_materialized_view_names(schema))
except NotImplementedError:
pass
if schema is not None:
available_w_schema: util.OrderedSet[str] = util.OrderedSet(
[f"{schema}.{name}" for name in available]
)
else:
available_w_schema = available
current = set(self.tables)
if only is None:
load = [
name
for name, schname in zip(available, available_w_schema)
if extend_existing or schname not in current
]
elif callable(only):
load = [
name
for name, schname in zip(available, available_w_schema)
if (extend_existing or schname not in current)
and only(name, self)
]
else:
missing = [name for name in only if name not in available]
if missing:
s = schema and (" schema '%s'" % schema) or ""
raise exc.InvalidRequestError(
"Could not reflect: requested table(s) not available "
"in %r%s: (%s)" % (bind.engine, s, ", ".join(missing))
)
load = [
name
for name in only
if extend_existing or name not in current
]
# pass the available tables so the inspector can
# choose to ignore the filter_names
_reflect_info = insp._get_reflection_info(
schema=schema,
filter_names=load,
available=available,
kind=kind,
scope=util.preloaded.engine_reflection.ObjectScope.ANY,
**dialect_kwargs,
)
reflect_opts["_reflect_info"] = _reflect_info
for name in load:
try:
Table(name, self, **reflect_opts)
except exc.UnreflectableTableError as uerr:
util.warn("Skipping table %s: %s" % (name, uerr))
def create_all(
self,
bind: _CreateDropBind,
tables: Optional[_typing_Sequence[Table]] = None,
checkfirst: bool = True,
) -> None:
"""Create all tables stored in this metadata.
Conditional by default, will not attempt to recreate tables already
present in the target database.
:param bind:
A :class:`.Connection` or :class:`.Engine` used to access the
database.
:param tables:
Optional list of ``Table`` objects, which is a subset of the total
tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, don't issue CREATEs for tables already present
in the target database.
"""
bind._run_ddl_visitor(
ddl.SchemaGenerator, self, checkfirst=checkfirst, tables=tables
)
def drop_all(
self,
bind: _CreateDropBind,
tables: Optional[_typing_Sequence[Table]] = None,
checkfirst: bool = True,
) -> None:
"""Drop all tables stored in this metadata.
Conditional by default, will not attempt to drop tables not present in
the target database.
:param bind:
A :class:`.Connection` or :class:`.Engine` used to access the
database.
:param tables:
Optional list of ``Table`` objects, which is a subset of the
total tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, only issue DROPs for tables confirmed to be
present in the target database.
"""
bind._run_ddl_visitor(
ddl.SchemaDropper, self, checkfirst=checkfirst, tables=tables
)
class Computed(FetchedValue, SchemaItem):
"""Defines a generated column, i.e. "GENERATED ALWAYS AS" syntax.
The :class:`.Computed` construct is an inline construct added to the
argument list of a :class:`_schema.Column` object::
from sqlalchemy import Computed
Table('square', metadata_obj,
Column('side', Float, nullable=False),
Column('area', Float, Computed('side * side'))
)
See the linked documentation below for complete details.
.. versionadded:: 1.3.11
.. seealso::
:ref:`computed_ddl`
"""
__visit_name__ = "computed_column"
column: Optional[Column[Any]]
@_document_text_coercion(
"sqltext", ":class:`.Computed`", ":paramref:`.Computed.sqltext`"
)
def __init__(
self, sqltext: _DDLColumnArgument, persisted: Optional[bool] = None
) -> None:
"""Construct a GENERATED ALWAYS AS DDL construct to accompany a
:class:`_schema.Column`.
:param sqltext:
A string containing the column generation expression, which will be
used verbatim, or a SQL expression construct, such as a
:func:`_expression.text`
object. If given as a string, the object is converted to a
:func:`_expression.text` object.
:param persisted:
Optional, controls how this column should be persisted by the
database. Possible values are:
* ``None``, the default, it will use the default persistence
defined by the database.
* ``True``, will render ``GENERATED ALWAYS AS ... STORED``, or the
equivalent for the target database if supported.
* ``False``, will render ``GENERATED ALWAYS AS ... VIRTUAL``, or
the equivalent for the target database if supported.
Specifying ``True`` or ``False`` may raise an error when the DDL
is emitted to the target database if the database does not support
that persistence option. Leaving this parameter at its default
of ``None`` is guaranteed to succeed for all databases that support
``GENERATED ALWAYS AS``.
"""
self.sqltext = coercions.expect(roles.DDLExpressionRole, sqltext)
self.persisted = persisted
self.column = None
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
assert isinstance(parent, Column)
if not isinstance(
parent.server_default, (type(None), Computed)
) or not isinstance(parent.server_onupdate, (type(None), Computed)):
raise exc.ArgumentError(
"A generated column cannot specify a server_default or a "
"server_onupdate argument"
)
self.column = parent
parent.computed = self
self.column.server_onupdate = self
self.column.server_default = self
def _as_for_update(self, for_update: bool) -> FetchedValue:
return self
@util.deprecated(
"1.4",
"The :meth:`_schema.Computed.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(
self, *, target_table: Optional[Table] = None, **kw: Any
) -> Computed:
return self._copy(target_table=target_table, **kw)
def _copy(
self, *, target_table: Optional[Table] = None, **kw: Any
) -> Computed:
sqltext = _copy_expression(
self.sqltext,
self.column.table if self.column is not None else None,
target_table,
)
g = Computed(sqltext, persisted=self.persisted)
return self._schema_item_copy(g)
class Identity(IdentityOptions, FetchedValue, SchemaItem):
"""Defines an identity column, i.e. "GENERATED { ALWAYS | BY DEFAULT }
AS IDENTITY" syntax.
The :class:`.Identity` construct is an inline construct added to the
argument list of a :class:`_schema.Column` object::
from sqlalchemy import Identity
Table('foo', metadata_obj,
Column('id', Integer, Identity())
Column('description', Text),
)
See the linked documentation below for complete details.
.. versionadded:: 1.4
.. seealso::
:ref:`identity_ddl`
"""
__visit_name__ = "identity_column"
def __init__(
self,
always: bool = False,
on_null: Optional[bool] = None,
start: Optional[int] = None,
increment: Optional[int] = None,
minvalue: Optional[int] = None,
maxvalue: Optional[int] = None,
nominvalue: Optional[bool] = None,
nomaxvalue: Optional[bool] = None,
cycle: Optional[bool] = None,
cache: Optional[int] = None,
order: Optional[bool] = None,
) -> None:
"""Construct a GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY DDL
construct to accompany a :class:`_schema.Column`.
See the :class:`.Sequence` documentation for a complete description
of most parameters.
.. note::
MSSQL supports this construct as the preferred alternative to
generate an IDENTITY on a column, but it uses non standard
syntax that only support :paramref:`_schema.Identity.start`
and :paramref:`_schema.Identity.increment`.
All other parameters are ignored.
:param always:
A boolean, that indicates the type of identity column.
If ``False`` is specified, the default, then the user-specified
value takes precedence.
If ``True`` is specified, a user-specified value is not accepted (
on some backends, like PostgreSQL, OVERRIDING SYSTEM VALUE, or
similar, may be specified in an INSERT to override the sequence
value).
Some backends also have a default value for this parameter,
``None`` can be used to omit rendering this part in the DDL. It
will be treated as ``False`` if a backend does not have a default
value.
:param on_null:
Set to ``True`` to specify ON NULL in conjunction with a
``always=False`` identity column. This option is only supported on
some backends, like Oracle.
:param start: the starting index of the sequence.
:param increment: the increment value of the sequence.
:param minvalue: the minimum value of the sequence.
:param maxvalue: the maximum value of the sequence.
:param nominvalue: no minimum value of the sequence.
:param nomaxvalue: no maximum value of the sequence.
:param cycle: allows the sequence to wrap around when the maxvalue
or minvalue has been reached.
:param cache: optional integer value; number of future values in the
sequence which are calculated in advance.
:param order: optional boolean value; if true, renders the
ORDER keyword.
"""
IdentityOptions.__init__(
self,
start=start,
increment=increment,
minvalue=minvalue,
maxvalue=maxvalue,
nominvalue=nominvalue,
nomaxvalue=nomaxvalue,
cycle=cycle,
cache=cache,
order=order,
)
self.always = always
self.on_null = on_null
self.column = None
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
assert isinstance(parent, Column)
if not isinstance(
parent.server_default, (type(None), Identity)
) or not isinstance(parent.server_onupdate, type(None)):
raise exc.ArgumentError(
"A column with an Identity object cannot specify a "
"server_default or a server_onupdate argument"
)
if parent.autoincrement is False:
raise exc.ArgumentError(
"A column with an Identity object cannot specify "
"autoincrement=False"
)
self.column = parent
parent.identity = self
if parent._user_defined_nullable is NULL_UNSPECIFIED:
parent.nullable = False
parent.server_default = self
def _as_for_update(self, for_update: bool) -> FetchedValue:
return self
@util.deprecated(
"1.4",
"The :meth:`_schema.Identity.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self, **kw: Any) -> Identity:
return self._copy(**kw)
def _copy(self, **kw: Any) -> Identity:
i = Identity(
always=self.always,
on_null=self.on_null,
start=self.start,
increment=self.increment,
minvalue=self.minvalue,
maxvalue=self.maxvalue,
nominvalue=self.nominvalue,
nomaxvalue=self.nomaxvalue,
cycle=self.cycle,
cache=self.cache,
order=self.order,
)
return self._schema_item_copy(i)
| {
"content_hash": "5b9db55bc713ec12292af40b6a907cd1",
"timestamp": "",
"source": "github",
"line_count": 5793,
"max_line_length": 101,
"avg_line_length": 37.26532021405144,
"alnum_prop": 0.5906669507777541,
"repo_name": "zzzeek/sqlalchemy",
"id": "f1caf79be8335d39f3585d558d1c46e96bcebb96",
"size": "216113",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "lib/sqlalchemy/sql/schema.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "21698"
},
{
"name": "Python",
"bytes": "16838583"
}
],
"symlink_target": ""
} |
"""Renderers that render RDFValues into JSON compatible data structures."""
import base64
import numbers
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import utils
from grr.lib.rdfvalues import structs
class ApiValueRenderer(object):
"""Baseclass for API renderers that render RDFValues."""
__metaclass__ = registry.MetaclassRegistry
value_class = object
_type_list_cache = {}
_renderers_cache = {}
@classmethod
def GetRendererForValue(cls, value, with_types=False, with_metadata=False,
limit_lists=-1):
"""Returns renderer corresponding to a given value and rendering args."""
cache_key = "%s_%s_%s_%d" % (value.__class__.__name__,
with_types,
with_metadata,
limit_lists)
try:
renderer_cls = cls._renderers_cache[cache_key]
except KeyError:
candidates = []
for candidate in ApiValueRenderer.classes.values():
if candidate.value_class:
candidate_class = candidate.value_class
else:
continue
if isinstance(value, candidate_class):
candidates.append((candidate, candidate_class))
if not candidates:
raise RuntimeError("No renderer found for value %s." %
value.__class__.__name__)
candidates = sorted(candidates,
key=lambda candidate: len(candidate[1].mro()))
renderer_cls = candidates[-1][0]
cls._renderers_cache[cache_key] = renderer_cls
return renderer_cls(with_types=with_types,
with_metadata=with_metadata,
limit_lists=limit_lists)
def __init__(self, with_types=False, with_metadata=False,
limit_lists=-1):
super(ApiValueRenderer, self).__init__()
self.with_types = with_types
self.with_metadata = with_metadata
self.limit_lists = limit_lists
def _PassThrough(self, value):
renderer = ApiValueRenderer.GetRendererForValue(
value, with_types=self.with_types, with_metadata=self.with_metadata,
limit_lists=self.limit_lists)
return renderer.RenderValue(value)
def _GetTypeList(self, value):
try:
return ApiValueRenderer._type_list_cache[value.__class__.__name__]
except KeyError:
type_list = [klass.__name__ for klass in value.__class__.__mro__]
ApiValueRenderer._type_list_cache[value.__class__.__name__] = type_list
return type_list
def _IncludeTypeInfoIfNeeded(self, result, original_value):
# If type information is needed, converted value is placed in the
# resulting dictionary under the 'value' key.
if self.with_types:
if hasattr(original_value, "age"):
age = original_value.age.AsSecondsFromEpoch()
else:
age = 0
return dict(type=original_value.__class__.__name__,
mro=self._GetTypeList(original_value),
value=result,
age=age)
else:
return result
def RenderValue(self, value):
return self._IncludeTypeInfoIfNeeded(utils.SmartUnicode(value), value)
class ApiNumberRenderer(ApiValueRenderer):
"""Renderer for numbers."""
value_class = numbers.Number
def RenderValue(self, value):
# Numbers are returned as-is.
return self._IncludeTypeInfoIfNeeded(value, value)
class ApiStringRenderer(ApiValueRenderer):
"""Renderer for strings."""
value_class = basestring
def RenderValue(self, value):
return self._IncludeTypeInfoIfNeeded(utils.SmartUnicode(value), value)
class ApiBytesRenderer(ApiValueRenderer):
"""Renderer for RDFBytes."""
value_class = bytes
def RenderValue(self, value):
result = base64.b64encode(value)
return self._IncludeTypeInfoIfNeeded(result, value)
class ApiEnumRenderer(ApiValueRenderer):
"""Renderer for deprecated (old-style) enums."""
value_class = structs.Enum
def RenderValue(self, value):
return self._IncludeTypeInfoIfNeeded(value.name, value)
class ApiEnumNamedValueRenderer(ApiValueRenderer):
"""Renderer for new-style enums."""
value_class = structs.EnumNamedValue
def RenderValue(self, value):
return self._IncludeTypeInfoIfNeeded(value.name, value)
class ApiDictRenderer(ApiValueRenderer):
"""Renderer for dicts."""
value_class = dict
def RenderValue(self, value):
result = {}
for k, v in value.items():
result[k] = self._PassThrough(v)
return result
class ApiListRenderer(ApiValueRenderer):
"""Renderer for lists."""
value_class = list
def RenderValue(self, value):
if self.limit_lists == 0:
return "<lists are omitted>"
elif self.limit_lists == -1:
return [self._PassThrough(v) for v in value]
else:
result = [self._PassThrough(v) for v in list(value)[:self.limit_lists]]
if len(value) > self.limit_lists:
if self.with_types:
result.append(dict(age=0,
mro=["FetchMoreLink"],
type="FetchMoreLink",
url="to/be/implemented"))
else:
result.append("<more items available>")
return result
class ApiRepeatedFieldHelperRenderer(ApiListRenderer):
"""Renderer for repeated fields helpers."""
value_class = structs.RepeatedFieldHelper
class ApiRDFValueArrayRenderer(ApiListRenderer):
"""Renderer for RDFValueArray."""
value_class = rdfvalue.RDFValueArray
class ApiRDFBoolRenderer(ApiValueRenderer):
"""Renderer for RDFBool."""
value_class = rdfvalue.RDFBool
def RenderValue(self, value):
return self._IncludeTypeInfoIfNeeded(value != 0, value)
class ApiRDFBytesRenderer(ApiValueRenderer):
"""Renderer for RDFBytes."""
value_class = rdfvalue.RDFBytes
def RenderValue(self, value):
result = base64.b64encode(value.SerializeToString())
return self._IncludeTypeInfoIfNeeded(result, value)
class ApiRDFStringRenderer(ApiValueRenderer):
"""Renderer for RDFString."""
value_class = rdfvalue.RDFString
def RenderValue(self, value):
result = utils.SmartUnicode(value)
return self._IncludeTypeInfoIfNeeded(result, value)
class ApiRDFIntegerRenderer(ApiValueRenderer):
"""Renderer for RDFInteger."""
value_class = rdfvalue.RDFInteger
def RenderValue(self, value):
result = int(value)
return self._IncludeTypeInfoIfNeeded(result, value)
class ApiFlowStateRenderer(ApiValueRenderer):
"""Renderer for FlowState."""
value_class = rdfvalue.FlowState
def RenderValue(self, value):
return self._PassThrough(value.data)
class ApiDataBlobRenderer(ApiValueRenderer):
"""Renderer for DataBlob."""
value_class = rdfvalue.DataBlob
def RenderValue(self, value):
return self._PassThrough(value.GetValue())
class ApiEmbeddedRDFValueRenderer(ApiValueRenderer):
"""Renderer for EmbeddedRDFValue."""
value_class = rdfvalue.EmbeddedRDFValue
def RenderValue(self, value):
return self._PassThrough(value.payload)
class ApiRDFProtoStructRenderer(ApiValueRenderer):
"""Renderer for RDFProtoStructs."""
value_class = rdfvalue.RDFProtoStruct
processors = []
descriptors_cache = {}
def RenderValue(self, value):
result = value.AsDict()
for k, v in value.AsDict().items():
result[k] = self._PassThrough(v)
for processor in self.processors:
result = processor(self, result, value)
result = self._IncludeTypeInfoIfNeeded(result, value)
if self.with_metadata:
try:
descriptors, order = self.descriptors_cache[value.__class__.__name__]
except KeyError:
descriptors = {}
order = []
for descriptor, _ in value.ListFields():
order.append(descriptor.name)
descriptors[descriptor.name] = {
"friendly_name": descriptor.friendly_name,
"description": descriptor.description
}
self.descriptors_cache[value.__class__.__name__] = (descriptors,
order)
result["metadata"] = descriptors
result["fields_order"] = order
return result
class ApiGrrMessageRenderer(ApiRDFProtoStructRenderer):
"""Renderer for GrrMessage objects."""
value_class = rdfvalue.GrrMessage
def RenderPayload(self, result, value):
"""Renders GrrMessage payload and renames args_rdf_name field."""
if "args_rdf_name" in result:
result["payload_type"] = result["args_rdf_name"]
del result["args_rdf_name"]
if "args" in result:
result["payload"] = self._PassThrough(value.payload)
del result["args"]
return result
processors = [RenderPayload]
def RenderValue(value, with_types=False, with_metadata=False,
limit_lists=-1):
if value is None:
return None
renderer = ApiValueRenderer.GetRendererForValue(value,
with_types=with_types,
with_metadata=with_metadata,
limit_lists=limit_lists)
return renderer.RenderValue(value)
| {
"content_hash": "70075e9ef55a356908425af41dd3d50d",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 78,
"avg_line_length": 27.41317365269461,
"alnum_prop": 0.6503931847968545,
"repo_name": "wandec/grr",
"id": "9ff468d54f0c93dff2486019e20af61685b283a8",
"size": "9178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/api_value_renderers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "37532"
},
{
"name": "Groff",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "34902"
},
{
"name": "JavaScript",
"bytes": "837191"
},
{
"name": "Makefile",
"bytes": "5597"
},
{
"name": "Protocol Buffer",
"bytes": "170955"
},
{
"name": "Python",
"bytes": "4770273"
},
{
"name": "Ruby",
"bytes": "2200"
},
{
"name": "Shell",
"bytes": "48612"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.