text stringlengths 4 1.02M | meta dict |
|---|---|
from bs4 import BeautifulSoup
from collections import OrderedDict
from datetime import datetime
from json import load as parse_json, dump as dump_json
from os import stat
from os.path import basename
from urllib.parse import quote as urlquote
import optparse
import requests
with open('config.json') as json_data_file:
configuration = parse_json(json_data_file)
canvas = configuration['canvas']
access_token= canvas["access_token"]
baseUrl = 'https://%s/api/v1/courses/' % canvas.get('host', 'kth.instructure.com')
header = {'Authorization' : 'Bearer ' + access_token}
lmsapiurl = configuration['lmsapi']
def main():
parser = optparse.OptionParser(
usage="Usage: %prog [options] course_code")
parser.add_option('-v', '--verbose',
dest="verbose",
default=False,
action="store_true",
help="Print lots of output to stdout"
)
parser.add_option('--canvasid', dest='canvasid',
help="Canvas id for the course (or use lms api)"
)
parser.add_option('--dir', dest='dumpdir', default='dump',
help="Directory to read dumps from"
)
parser.add_option('--nop', dest='nop', default=False, action='store_true',
help="Only show canvas course for course round."
)
options, args = parser.parse_args()
if options.canvasid and len(args) != 1:
parser.error("Exactly one course_code is required when giving canvas id")
elif len(args) == 0:
parser.error("At least one course_code is required")
for course_code in args:
course_id = options.canvasid or find_canvas_id(course_code)
if not course_id:
print("Canvas course id not given or found")
exit(1)
dumpdir = options.dumpdir
if options.verbose:
print("Upload to %s (canvas #%s) from %s" % (
course_code, course_id, dumpdir))
if options.nop:
continue
course_code = course_code[:6]
with open('%s/%s/pages.json' % (dumpdir, course_code)) as json:
dumpdata = parse_json(json)
uploaded_files = {}
for data in dumpdata:
if options.verbose:
print("Should upload", data)
# Use the Canvas API to insert the page
#POST /api/v1/courses/:course_id/pages
# wiki_page[title]
# wiki_page[body]
# wiki_page[published]
html = BeautifulSoup(open("%s/%s/pages/%s.html" % (dumpdir, course_code, data['slug'])), "html.parser")
for link in html.findAll(href=True):
linkdata = next(filter(lambda i: i['url'] == link['href'], data['links']), None)
if linkdata and linkdata.get('category') == 'file':
canvas_url = uploaded_files.get(link['href'])
if not canvas_url:
canvas_url = create_file(course_id, '%s/%s/pages/%s' % (dumpdir, course_code, linkdata['url']),
basename(linkdata['url']))
print("Uploaded %s to %s for link" % (link['href'], canvas_url))
uploaded_files[link['href']] = canvas_url
else:
print("%s is allready at %s" % (link['href'], canvas_url))
link['href'] = canvas_url
linkdata['url'] = canvas_url
for img in html.findAll('img'):
imgdata = next(filter(lambda i: i['url'] == img.get('src'), data['links']), {})
if imgdata.get('category') == 'file':
canvas_url = uploaded_files.get(img['src'])
if not canvas_url:
canvas_url = create_file(course_id, '%s/%s/pages/%s' % (dumpdir, course_code, imgdata['url']),
basename(imgdata['url']))
print("Uploaded %s to %s for img" % (img['src'], canvas_url))
uploaded_files[img['src']] = canvas_url
else:
print("%s is allready at %s" % (img['src'], canvas_url))
img['src'] = canvas_url
imgdata['url'] = canvas_url
for tex in html.findAll('span', attrs={'role': 'formula', 'data-language': 'tex'}):
img = html.new_tag('img')
img['src'] = '/equation_images/' + urlquote(tex.text)
img['alt'] = tex.text
img['class'] = tex.get('class')
tex.replace_with(img)
if options.verbose:
print("Modified formula %s to: %s" % (tex, img))
url = baseUrl + '%s/pages' % (course_id)
print("Should post page to", url)
payload={
'wiki_page[title]': data['title'],
'wiki_page[published]': False,
'wiki_page[body]': str(html)
}
if options.verbose:
print(payload)
r = requests.post(url, headers = header, data=payload)
if r.status_code == requests.codes.ok:
page_response=r.json()
if options.verbose:
print("result of post creating page: %s" % page_response)
print("Uploaded page to %s" % page_response['html_url'])
data['url'] = page_response['html_url']
else:
print("Failed to upload page %s: %s" % (data['title'], r))
dumpname = '%s/%s/zzz-import-%s-%s.json' % (
dumpdir, course_code, course_code, datetime.now().strftime('%Y%m%d-%H%M%S'))
with open(dumpname, 'w') as json:
dump_json(dumpdata, json, indent=4)
result = create_file(course_id, dumpname, basename(dumpname))
print('Uploaded final result to %s' % result)
def create_file(course_id, full_folder_name, file_name, verbose=False):
url = baseUrl + '%s/files' %(course_id)
try:
statinfo = stat(full_folder_name)
except:
try:
full_folder_name = full_folder_name.replace('+', '%20')
statinfo = stat(full_folder_name)
except:
from urllib.parse import unquote
full_folder_name = unquote(full_folder_name)
statinfo = stat(full_folder_name)
payload = {
'name' : file_name,
'size' : statinfo.st_size,
}
if verbose:
print("Upload %s as %s" % (full_folder_name, payload))
# note that the following must use a "post" and not a "put" operation
phase1_response = requests.post(url, headers=header, data=payload)
if phase1_response.status_code != 200:
print('Error in upload phase 1: %s\n%s' % (phase1_response, phase1_response.text))
exit(1)
phase1_response_data=phase1_response.json()
if verbose:
print("Phase 1 response: %s" % phase1_response_data)
upload_url=phase1_response_data['upload_url']
data = OrderedDict(phase1_response_data['upload_params'])
data[phase1_response_data['file_param']] = open(full_folder_name, 'rb')
#data.move_to_end(phase1_response_data['file_param'])
if verbose:
print("Post to %s: %s" % (upload_url, data))
phase2_response=requests.post(upload_url, files=data, allow_redirects=False)
if phase2_response.status_code >= 400:
print('Error in upload phase 2: %s\n%s' % (phase2_response, phase2_response.text))
exit(1)
if verbose:
print("Phase 2 should redirect: %s %s" % (phase2_response, phase2_response.headers))
phase3_response = requests.get(phase2_response.headers.get('Location'), headers=header)
phase3_data = phase3_response.json()
if phase1_response.status_code != 200:
print('Error in upload phase 3: %s\n%s' % (phase1_response, phase1_response.text))
exit(1)
if verbose:
print("Phase 3 response: %s, json: %s" % (phase3_response, phase3_data))
url = phase3_data['url']
return url[0:url.find('?')]
def find_canvas_id(coursecode, forterm='HT17'):
print('Url: %s' % ('%s/courses/%s' % (lmsapiurl, coursecode[:6])))
resp = requests.get('%s/courses/%s' % (lmsapiurl, coursecode[:6]))
if resp.status_code != 200:
print('Failed to get canvas data for %s: %s' % (coursecode[:6], resp));
return None
data = resp.json()
if len(data) == 1:
return data[0]['id']
found = {}
for j in data:
if j['sis_course_id'] == coursecode:
return j['id']
if j['sis_course_id'][:10] == coursecode + forterm:
found[j['sis_course_id']] = j['id']
#else:
# print('Ignoring %s' % j['sis_course_id'])
if len(found) == 1:
print("This should be simple: %s" % found)
return found.popitem()[1]
print('Failed to get canvas data for %s; got: %s' % (coursecode, data));
return None
if __name__ == '__main__': main()
| {
"content_hash": "d1179c83b72683fd04a51175701bc0b0",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 119,
"avg_line_length": 42.605633802816904,
"alnum_prop": 0.5457851239669421,
"repo_name": "KTH/canvas-import-social",
"id": "5dc2b8d7267add610939bd5a7151e8aff0e0e987",
"size": "9099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/import_course.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9099"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division
from desiutil.log import get_logger
import argparse
import numpy as np
from desispec.qa import __offline_qa_version__
def parse(options=None):
parser = argparse.ArgumentParser(description="Generate QA on Sky Subtraction residuals [v{:s}]".format(__offline_qa_version__))
parser.add_argument('--expid', type=int, help='Generate exposure plot on given exposure')
parser.add_argument('--channels', type=str, help='List of channels to include')
parser.add_argument('--prod', default=False, action="store_true", help="Results for full production run")
parser.add_argument('--gauss', default=False, action="store_true", help="Expore Gaussianity for full production run")
parser.add_argument('--nights', type=str, help='List of nights to limit prod plots')
parser.add_argument('--skyline', default=False, action="store_true", help="Skyline residuals?")
parser.add_argument('--qaprod_dir', type=str, default=None, help='Path to where QA figure files are generated. Default is qaprod_dir')
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args) :
# imports
import glob
from desispec.io import findfile, makepath
from desispec.io import get_exposures
from desispec.io import get_files, get_nights
from desispec.io import get_reduced_frames
from desispec.io import specprod_root
from desispec.io import qaprod_root
from desispec.qa import utils as qa_utils
import copy
import pdb
# Init
specprod_dir = specprod_root()
# Log
log=get_logger()
log.info("starting")
# Path
if args.qaprod_dir is not None:
qaprod_dir = args.qaprod_dir
else:
qaprod_dir = qaprod_root()
# Channels
if args.channels is not None:
channels = [iarg for iarg in args.channels.split(',')]
else:
channels = ['b','r','z']
# Sky dict
sky_dict = dict(wave=[], skyflux=[], res=[], count=0)
channel_dict = dict(b=copy.deepcopy(sky_dict),
r=copy.deepcopy(sky_dict),
z=copy.deepcopy(sky_dict),
)
# Nights
if args.nights is not None:
nights = [iarg for iarg in args.nights.split(',')]
else:
nights = None
# Exposure plot?
if args.expid is not None:
# Nights
if nights is None:
nights = get_nights()
nights.sort()
# Find the exposure
for night in nights:
if args.expid in get_exposures(night, specprod_dir=specprod_dir):
frames_dict = get_files(filetype=str('cframe'), night=night,
expid=args.expid, specprod_dir=specprod_dir)
# Loop on channel
#for channel in ['b','r','z']:
for channel in ['z']:
channel_dict[channel]['cameras'] = []
for camera, cframe_fil in frames_dict.items():
if channel in camera:
sky_file = findfile(str('sky'), night=night, camera=camera,
expid=args.expid, specprod_dir=specprod_dir)
wave, flux, res, _ = qa_utils.get_skyres(cframe_fil)
# Append
channel_dict[channel]['wave'].append(wave)
channel_dict[channel]['skyflux'].append(np.log10(np.maximum(flux,1e-1)))
channel_dict[channel]['res'].append(res)
channel_dict[channel]['cameras'].append(camera)
channel_dict[channel]['count'] += 1
if channel_dict[channel]['count'] > 0:
from desispec.qa.qa_plots import skysub_resid_series # Hidden to help with debugging
skysub_resid_series(channel_dict[channel], 'wave',
outfile=qaprod_dir+'/QA_skyresid_wave_expid_{:d}{:s}.png'.format(args.expid, channel))
skysub_resid_series(channel_dict[channel], 'flux',
outfile=qaprod_dir+'/QA_skyresid_flux_expid_{:d}{:s}.png'.format(args.expid, channel))
return
# Skyline
if args.skyline:
from desispec.qa.qa_plots import skyline_resid
# Loop on channel
for channel in channels:
cframes = get_reduced_frames(nights=nights, channels=[channel])
if len(cframes) > 0:
log.info("Loading sky residuals for {:d} cframes".format(len(cframes)))
if len(cframes) == 1:
log.error('len(cframes)==1; starting debugging')
pdb.set_trace() # Need to call differently
else:
sky_wave, sky_flux, sky_res, sky_ivar = qa_utils.get_skyres(
cframes, flatten=False)
# Plot
outfile=args.outdir+'/skyline_{:s}.png'.format(channel)
log.info("Plotting to {:s}".format(outfile))
skyline_resid(channel, sky_wave, sky_flux, sky_res, sky_ivar,
outfile=outfile)
return
# Full Prod Plot?
if args.prod:
from desispec.qa.qa_plots import skysub_resid_dual
# Loop on channel
for channel in channels:
cframes = get_reduced_frames(nights=nights, channels=[channel])
if len(cframes) > 0:
log.info("Loading sky residuals for {:d} cframes".format(len(cframes)))
sky_wave, sky_flux, sky_res, _ = qa_utils.get_skyres(cframes)
# Plot
outfile=qaprod_dir+'/skyresid_prod_dual_{:s}.png'.format(channel)
makepath(outfile)
log.info("Plotting to {:s}".format(outfile))
skysub_resid_dual(sky_wave, sky_flux, sky_res, outfile=outfile)
return
# Test sky noise for Gaussianity
if args.gauss:
from desispec.qa.qa_plots import skysub_gauss
# Loop on channel
for channel in channels:
cframes = get_reduced_frames(nights=nights, channels=[channel])
if len(cframes) > 0:
# Cut down for debugging
#cframes = [cframes[ii] for ii in range(15)]
#
log.info("Loading sky residuals for {:d} cframes".format(len(cframes)))
sky_wave, sky_flux, sky_res, sky_ivar = qa_utils.get_skyres(cframes)
# Plot
log.info("Plotting..")
outfile=qaprod_dir+'/skyresid_prod_gauss_{:s}.png'.format(channel)
makepath(outfile)
skysub_gauss(sky_wave, sky_flux, sky_res, sky_ivar,
outfile=outfile)
return
| {
"content_hash": "67776ceb91b72b0337fab6caa25aae9f",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 139,
"avg_line_length": 42.93788819875776,
"alnum_prop": 0.5602488065962679,
"repo_name": "desihub/desispec",
"id": "a7d9fbd05b10cdb0faee2d54c0bbb238517f5d9e",
"size": "6963",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "py/desispec/scripts/skysubresid.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "761"
},
{
"name": "Python",
"bytes": "4219435"
},
{
"name": "Shell",
"bytes": "17927"
}
],
"symlink_target": ""
} |
from direct.showbase.DirectObject import DirectObject
from direct.directnotify.DirectNotifyGlobal import directNotify
class DistributedObjectBase(DirectObject):
"""
The Distributed Object class is the base class for all network based
(i.e. distributed) objects. These will usually (always?) have a
dclass entry in a *.dc file.
"""
notify = directNotify.newCategory("DistributedObjectBase")
def __init__(self, cr):
assert self.notify.debugStateCall(self)
self.cr = cr
self.children = {}
self.parentId = None
self.zoneId = None
if __debug__:
def status(self, indent=0):
"""
print out "doId(parentId, zoneId) className"
"""
spaces = ' ' * (indent + 2)
try:
print("%s%s:" % (' ' * indent, self.__class__.__name__))
print("%sfrom DistributedObject doId:%s, parent:%s, zone:%s" % (
spaces, self.doId, self.parentId, self.zoneId))
except Exception as e:
print("%serror printing status %s" % (spaces, e))
def getLocation(self):
try:
if self.parentId == 0 and self.zoneId == 0:
return None
# This is a -1 stuffed into a uint32
if self.parentId == 0xffffffff and self.zoneId == 0xffffffff:
return None
return (self.parentId, self.zoneId)
except AttributeError:
return None
def handleChildArrive(self, childObj, zoneId):
"""
A new child has just setLocation beneath us. Give us a
chance to run code when a new child sets location to us. For
example, we may want to scene graph reparent the child to
some subnode we own.
"""
assert self.notify.debugCall()
# Inheritors should override
pass
def handleChildArriveZone(self, childObj, zoneId):
"""
A child has just changed zones beneath us with setLocation.
Give us a chance to run code when an existing child sets
location to us. For example, we may want to scene graph
reparent the child to some subnode we own.
"""
assert self.notify.debugCall()
# Inheritors should override
pass
def handleChildLeave(self, childObj, zoneId):
"""
A child is about to setLocation away from us. Give us a
chance to run code just before a child sets location away from us.
"""
assert self.notify.debugCall()
# Inheritors should override
pass
def handleChildLeaveZone(self, childObj, zoneId):
"""
A child is about to setLocation to another zone beneath us.
Give us a chance to run code just before a child sets
location to that zone.
"""
assert self.notify.debugCall()
# Inheritors should override
pass
def handleQueryObjectChildrenLocalDone(self, context):
assert self.notify.debugCall()
# Inheritors should override
pass
def getParentObj(self):
if self.parentId is None:
return None
return self.cr.doId2do.get(self.parentId)
def hasParentingRules(self):
return self.dclass.getFieldByName('setParentingRules') != None
| {
"content_hash": "378d8fdc0075b083823ef1ff1efa29d7",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 80,
"avg_line_length": 35.09473684210526,
"alnum_prop": 0.6019796040791842,
"repo_name": "brakhane/panda3d",
"id": "e105e4c891fb7b373e0a419541b05151b17e1606",
"size": "3335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "direct/src/distributed/DistributedObjectBase.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4004"
},
{
"name": "C",
"bytes": "6395016"
},
{
"name": "C++",
"bytes": "31193551"
},
{
"name": "Emacs Lisp",
"bytes": "166274"
},
{
"name": "Groff",
"bytes": "3106"
},
{
"name": "HTML",
"bytes": "8081"
},
{
"name": "Java",
"bytes": "3777"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Logos",
"bytes": "5504"
},
{
"name": "MAXScript",
"bytes": "1745"
},
{
"name": "NSIS",
"bytes": "91955"
},
{
"name": "Nemerle",
"bytes": "4403"
},
{
"name": "Objective-C",
"bytes": "30065"
},
{
"name": "Objective-C++",
"bytes": "300394"
},
{
"name": "Perl",
"bytes": "206982"
},
{
"name": "Perl6",
"bytes": "30636"
},
{
"name": "Puppet",
"bytes": "2627"
},
{
"name": "Python",
"bytes": "5530601"
},
{
"name": "Rebol",
"bytes": "421"
},
{
"name": "Shell",
"bytes": "55940"
},
{
"name": "Visual Basic",
"bytes": "136"
}
],
"symlink_target": ""
} |
import alooma
import os
import sys
import pprint
api = alooma.Alooma(
hostname=os.environ['ALOOMA_HOST'],
port=int(os.environ['ALOOMA_PORT']),
username=os.environ['ALOOMA_USERNAME'],
password=os.environ['ALOOMA_PASSWORD'])
mapping_name = sys.argv[1]
schema_name = sys.argv[2]
table_name = sys.argv[3]
pprint.pprint(
'Changing mapping ' + mapping_name + ' to consolidate to schema ' + schema_name + ' and table ' + table_name
)
mp = api.get_mapping(mapping_name)
mp['consolidation']['consolidatedSchema'] = schema_name
mp['consolidation']['consolidatedTableName'] = table_name
mp['mapping']['schema'] = schema_name
mp['mapping']['tableName'] = table_name + '_log'
api.set_mapping(mp, mapping_name)
| {
"content_hash": "952edc8d09acd48d34741dbcc037481b",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 112,
"avg_line_length": 30,
"alnum_prop": 0.7027777777777777,
"repo_name": "rocketmiles/alooma-scripts",
"id": "35011b093a8c266123d09d75125409ef538a88d7",
"size": "720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/fix-mapping.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "1337"
},
{
"name": "Makefile",
"bytes": "7853"
},
{
"name": "Python",
"bytes": "2620"
}
],
"symlink_target": ""
} |
import logging
__version__ = (0, 0, 1)
__author__ = "Rafał Furmański"
__contact__ = "r.furmanski@gmail.com"
__homepage__ = "http://github.com/r4fek/dsmcache"
__docformat__ = "markdown"
logging.getLogger('dsmcache').setLevel(logging.INFO)
| {
"content_hash": "050a664dce088e3834a540f4cbcce3cc",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 52,
"avg_line_length": 24.1,
"alnum_prop": 0.6721991701244814,
"repo_name": "r4fek/dsmcache",
"id": "e348dfd4ef78e93fbc9593bc5987640c66ff8dbf",
"size": "267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dsmcache/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23722"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from types import FrameType
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from django.core.management import CommandError
from django.conf import settings
from django.utils import autoreload
from zerver.worker.queue_processors import get_worker, get_active_worker_queues
import sys
import signal
import logging
import threading
class Command(BaseCommand):
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('--queue_name', metavar='<queue name>', type=str,
help="queue to process")
parser.add_argument('--worker_num', metavar='<worker number>', type=int, nargs='?', default=0,
help="worker label")
parser.add_argument('--all', dest="all", action="store_true", default=False,
help="run all queues")
help = "Runs a queue processing worker"
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
logging.basicConfig()
logger = logging.getLogger('process_queue')
if not settings.USING_RABBITMQ:
# Make the warning silent when running the tests
if settings.TEST_SUITE:
logger.info("Not using RabbitMQ queue workers in the test suite.")
else:
logger.error("Cannot run a queue processor when USING_RABBITMQ is False!")
sys.exit(1)
def run_threaded_workers(logger):
# type: (logging.Logger) -> None
cnt = 0
for queue_name in get_active_worker_queues():
if not settings.DEVELOPMENT:
logger.info('launching queue worker thread ' + queue_name)
cnt += 1
td = Threaded_worker(queue_name)
td.start()
logger.info('%d queue worker threads were launched' % (cnt,))
if options['all']:
autoreload.main(run_threaded_workers, (logger,))
else:
queue_name = options['queue_name']
worker_num = options['worker_num']
logger.info("Worker %d connecting to queue %s" % (worker_num, queue_name))
worker = get_worker(queue_name)
worker.setup()
def signal_handler(signal, frame):
# type: (int, FrameType) -> None
logger.info("Worker %d disconnecting from queue %s" % (worker_num, queue_name))
worker.stop()
sys.exit(0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
worker.start()
class Threaded_worker(threading.Thread):
def __init__(self, queue_name):
# type: (str) -> None
threading.Thread.__init__(self)
self.worker = get_worker(queue_name)
def run(self):
# type: () -> None
self.worker.setup()
logging.debug('starting consuming ' + self.worker.queue_name)
self.worker.start()
| {
"content_hash": "f960c8d31e7c30ba5b7e7ee0904a16a2",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 102,
"avg_line_length": 37.80487804878049,
"alnum_prop": 0.5903225806451613,
"repo_name": "vikas-parashar/zulip",
"id": "48afef41260f6dc890821f844336274f2529cf2d",
"size": "3100",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/management/commands/process_queue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "241321"
},
{
"name": "Groovy",
"bytes": "5516"
},
{
"name": "HTML",
"bytes": "459360"
},
{
"name": "JavaScript",
"bytes": "1466602"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "82241"
},
{
"name": "Python",
"bytes": "2930542"
},
{
"name": "Ruby",
"bytes": "249748"
},
{
"name": "Shell",
"bytes": "35313"
}
],
"symlink_target": ""
} |
from etherscan.tokens import Tokens
import json
with open('../../api_key.json', mode='r') as key_file:
key = json.loads(key_file.read())['key']
# tokenname options are:
# DGD
# MKR
# TheDAO
address = '0x0a869d79a7052c7f1b55a8ebabbea3420f0d1e13'
api = Tokens(tokenname='TheDAO', api_key=key)
balance = api.get_token_balance(address=address)
print(balance)
| {
"content_hash": "98b84156247e15d299829441bd6cd7ea",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 54,
"avg_line_length": 26.714285714285715,
"alnum_prop": 0.7032085561497327,
"repo_name": "valerius21/ERMA17",
"id": "1e0b6bd259cec51d6454225959f054ca4da18e77",
"size": "374",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py_ethio_api/build/lib/examples/tokens/get_token_balance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "571"
},
{
"name": "Jupyter Notebook",
"bytes": "19821"
},
{
"name": "Python",
"bytes": "36042"
}
],
"symlink_target": ""
} |
from odoo import http
# class SarinahBarcodeScanning(http.Controller):
# @http.route('/sarinah_barcode_scanning/sarinah_barcode_scanning/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/sarinah_barcode_scanning/sarinah_barcode_scanning/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('sarinah_barcode_scanning.listing', {
# 'root': '/sarinah_barcode_scanning/sarinah_barcode_scanning',
# 'objects': http.request.env['sarinah_barcode_scanning.sarinah_barcode_scanning'].search([]),
# })
# @http.route('/sarinah_barcode_scanning/sarinah_barcode_scanning/objects/<model("sarinah_barcode_scanning.sarinah_barcode_scanning"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('sarinah_barcode_scanning.object', {
# 'object': obj
# }) | {
"content_hash": "bdd095b788b8bdba773fa70edce273f2",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 160,
"avg_line_length": 48.8421052631579,
"alnum_prop": 0.6530172413793104,
"repo_name": "budihartono/odoo_addons",
"id": "422ed4319e58b14d67303e56544f02100ba99993",
"size": "952",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sale_barcode_scan/controllers/controllers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10397"
}
],
"symlink_target": ""
} |
"""BosonOperator stores a sum of products of bosonic ladder operators."""
from openfermion.ops._symbolic_operator import SymbolicOperator
class BosonOperator(SymbolicOperator):
r"""BosonOperator stores a sum of products of bosonic ladder operators.
In OpenFermion, we describe bosonic ladder operators using the shorthand:
'i^' = b^\dagger_i
'j' = b_j
where ['i', 'j^'] = delta_ij is the commutator.
One can multiply together these bosonic ladder operators to obtain a
bosonic term. For instance, '2^ 1' is a bosonic term which
creates at mode 2 and destroys at mode 1. The BosonicOperator class
also stores a coefficient for the term, e.g. '3.17 * 2^ 1'.
The BosonOperator class is designed (in general) to store sums of these
terms. For instance, an instance of BosonOperator might represent
3.17 2^ 1 - 66.2 * 8^ 7 6^ 2
The Bosonic Operator class overloads operations for manipulation of
these objects by the user.
BosonOperator is a subclass of SymbolicOperator. Importantly, it has
attributes set as follows::
actions = (1, 0)
action_strings = ('^', '')
action_before_index = False
different_indices_commute = True
See the documentation of SymbolicOperator for more details.
Example:
.. code-block:: python
H = (BosonOperator('0^ 3', .5)
+ .5 * BosonOperator('3^ 0'))
# Equivalently
H2 = BosonOperator('0^ 3', 0.5)
H2 += BosonOperator('3^ 0', 0.5)
Note:
Adding BosonOperator is faster using += (as this
is done by in-place addition). Specifying the coefficient
during initialization is faster than multiplying a BosonOperator
with a scalar.
"""
@property
def actions(self):
"""The allowed actions."""
return (1, 0)
@property
def action_strings(self):
"""The string representations of the allowed actions."""
return ('^', '')
@property
def action_before_index(self):
"""Whether action comes before index in string representations."""
return False
@property
def different_indices_commute(self):
"""Whether factors acting on different indices commute."""
return True
def is_normal_ordered(self):
"""Return whether or not term is in normal order.
In our convention, ladder operators come first.
Note that unlike the Fermion operator, due to the commutation
of ladder operators with different indices, the BosonOperator
sorts ladder operators by index.
"""
for term in self.terms:
for i in range(1, len(term)):
for j in range(i, 0, -1):
right_operator = term[j]
left_operator = term[j - 1]
if (right_operator[0] == left_operator[0] and
right_operator[1] > left_operator[1]):
return False
return True
def is_boson_preserving(self):
"""Query whether the term preserves particle number.
This is equivalent to requiring the same number of
raising and lowering operators in each term.
"""
for term in self.terms:
# Make sure term conserves particle number
particles = 0
for operator in term:
particles += (-1) ** operator[1] # add 1 if create, else -1
if not (particles == 0):
return False
return True
| {
"content_hash": "568947bee338587bceec033818c729cc",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 77,
"avg_line_length": 35.029411764705884,
"alnum_prop": 0.609291911558914,
"repo_name": "jarrodmcc/OpenFermion",
"id": "651a32475fc286ab27e900d76fbb198ba2f62bd3",
"size": "4136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/openfermion/ops/_boson_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1370322"
},
{
"name": "Shell",
"bytes": "10029"
}
],
"symlink_target": ""
} |
"""
PyPDF2 is a free and open-source pure-python PDF library capable of splitting,
merging, cropping, and transforming the pages of PDF files. It can also add
custom data, viewing options, and passwords to PDF files. PyPDF2 can retrieve
text and metadata from PDFs as well.
You can read the full docs at https://pypdf2.readthedocs.io/.
"""
from ._encryption import PasswordType
from ._merger import PdfFileMerger, PdfMerger
from ._page import PageObject, Transformation
from ._reader import DocumentInformation, PdfFileReader, PdfReader
from ._version import __version__
from ._writer import PdfFileWriter, PdfWriter
from .pagerange import PageRange, parse_filename_page_ranges
from .papersizes import PaperSize
__all__ = [
"__version__",
"PageRange",
"PaperSize",
"DocumentInformation",
"parse_filename_page_ranges",
"PdfFileMerger", # will be removed in PyPDF2 3.0.0; use PdfMerger instead
"PdfFileReader", # will be removed in PyPDF2 3.0.0; use PdfReader instead
"PdfFileWriter", # will be removed in PyPDF2 3.0.0; use PdfWriter instead
"PdfMerger",
"PdfReader",
"PdfWriter",
"Transformation",
"PageObject",
"PasswordType",
]
| {
"content_hash": "1635e6cea45c26dc84e97d16e3cc814f",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 35.029411764705884,
"alnum_prop": 0.7313182199832073,
"repo_name": "xilopaint/alfred-pdf-tools",
"id": "08b03d5ec0b0e6298903d8a2fc90af384330b87f",
"size": "1191",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/PyPDF2/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1258018"
},
{
"name": "Shell",
"bytes": "5592"
}
],
"symlink_target": ""
} |
import abc
from pandas.tseries.holiday import *
# helper for specifying months below
Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, Nov, Dec = range(1, 13)
NewYearsDay = Holiday('New Years Day', month=Jan, day=1,
observance=weekend_to_monday)
SpringBankHoliday = Holiday('Spring Bank Holiday', month=May, day=1,
offset=DateOffset(weekday=MO(1)))
MayDay = Holiday('May Day', month=May, day=25, offset=DateOffset(weekday=MO(1)))
SummerBankHoliday = Holiday('August Bank', month=Aug, day=25,
offset=DateOffset(weekday=MO(1)))
ChristmasDay = Holiday('Christmas Day', month=Dec, day=25,
observance=weekend_to_monday)
BoxingDay = Holiday('Boxing Day', month=Dec, day=26,
observance=next_monday_or_tuesday)
class TZAbstractHolidayCalendar(AbstractHolidayCalendar):
"""Container to hold extension of holidays from AbstractHolidayCalendar so
that it returns a tz-aware DatetimeIndex
"""
@abc.abstractproperty
def tz(self):
"""The timezone that will be used to make the return holidays tz-aware
"""
pass
def holidays(self, *args, **kwargs):
"""Extends holidays method of AbstractHolidayCalendar by localizing the
returned DatetimeIndex
Parameters
----------
Same as AbstractHolidayCalendar
Returns
-------
DatetimeIndex
tz-aware DatetimeIndex
"""
holidays = super(TZAbstractHolidayCalendar, self).holidays(*args, **kwargs)
holidays = holidays.drop_duplicates()
tz_holidays = holidays.tz_localize(self.tz)
return tz_holidays
class UKHolidayCalendar(TZAbstractHolidayCalendar):
"""Standard UK Holidays"""
rules = [NewYearsDay, GoodFriday, EasterMonday, SpringBankHoliday,
MayDay, SummerBankHoliday, ChristmasDay, BoxingDay]
tz = 'Europe/London'
| {
"content_hash": "98627b25d002678fabb31de8a583d95f",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 83,
"avg_line_length": 36.22222222222222,
"alnum_prop": 0.647239263803681,
"repo_name": "paze-me/mambu",
"id": "83c536648b6f9eacc79a9b47581abe580381ded9",
"size": "1956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mambu/tools/calendars.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "59018"
}
],
"symlink_target": ""
} |
"""Class to generate a cosmic population of FRBs."""
import numpy as np
from frbpoppy.misc import pprint
from frbpoppy.number_density import NumberDensity
from frbpoppy.population import Population
import frbpoppy.direction_dists as did
import frbpoppy.dm_dists as dmd
import frbpoppy.time_dists as td
import frbpoppy.w_dists as wd
import frbpoppy.si_dists as sid
import frbpoppy.lum_dists as ld
import frbpoppy.galacticops as go
import frbpoppy.precalc as pc
class CosmicPopulation(Population):
"""Generate a cosmic FRB population."""
def __init__(self,
n_srcs=1e4,
n_days=1,
name='cosmic',
repeaters=False,
generate=False):
"""Generate a popuation of FRBs.
Args:
n_srcs (int): Number of FRB sources to generate.
n_days (float): Number of days over which FRBs are to be generated.
name (str): Population name.
repeaters (bool): Whether to generate a repeater population.
generate (bool): Whether to create a population.
Returns:
Population: Population of FRBs
"""
# Set up general population arguments
Population.__init__(self)
# Give population a name
self.name = 'cosmic'
if name:
self.name = name
# Set population arguments
self.n_srcs = int(n_srcs)
self.n_days = n_days
self.repeaters = repeaters
self.shape = (self.n_srcs,)
# If wanting repeaters
if self.repeaters:
self.set_time()
# Set up default models
self.set_emission_range()
self.set_dist()
self.set_direction()
self.set_lum()
self.set_si()
self.set_w()
self.set_dm_mw()
self.set_dm_igm()
self.set_dm_host()
self.set_dm()
# Whether to start generating a Cosmic Population
if generate:
self.generate()
def gen_index(self):
"""Generate indices for over each FRB source."""
self.frbs.index = np.arange(self.n_srcs)
def set_emission_range(self, low=100e6, high=10e9):
"""Set the emission range [Hz].
The frequency range between which FRB sources should emit the given
bolometric luminosity.
Args:
f_min (float): Lowest source frequency [Hz].
f_max (float): Highest source frequency [Hz].
"""
self.f_min = low
self.f_max = high
def gen_precalc(self):
"""Check whether pre-calculations have been run."""
pc.DistanceTable(H_0=self.H_0, W_m=self.W_m, W_v=self.W_v)
def set_dist(self, model='vol_co', **kwargs):
"""Set the number density model for calculating distances.
Args:
model (str): Number density model to use. Choice from
('vol_co', 'sfr', 'smd').
z_max (float): Maximum redshift.
H_0 (float): Hubble constant.
W_m (float): Density parameter Ω_m.
W_v (float): Cosmological constant Ω_Λ.
alpha (float): Desired log N log S slope for a perfect,
non-cosmological population.
"""
# Option to use your own model
if not isinstance(model, str):
self.dist_func = lambda: model(**kwargs)
return
# I sometimes use 'constant' instead of 'vol_co'
if model == 'constant':
model = 'vol_co'
# Check whether recognised number density model
if model not in ['vol_co', 'sfr', 'smd']:
raise ValueError('set_dist input not recognised')
# Set number density model
# Don't fear the lambda, merely delays executing the function
self.dist_func = lambda: NumberDensity(model=model, **kwargs)
def gen_dist(self):
"""Generate source distances."""
n_model = self.dist_func()
self.vol_co_max = n_model.vol_co_max
self.frbs.z, self.frbs.dist_co = n_model.draw(self.n_srcs)
def gen_gal_coords(self):
"""Generate galactic coordinates."""
frbs = self.frbs
# Get the proper distance
dist_pr = frbs.dist_co/(1+frbs.z)
# Convert into galactic coordinates
frbs.gx, frbs.gy, frbs.gz = go.lb_to_xyz(frbs.gl, frbs.gb, dist_pr)
def set_direction(self, model='uniform', **kwargs):
"""Set the model for generating the directions of the frb sources.
Args:
model (str): Choice from ('uniform').
if model == 'uniform':
min_ra (float): Minimum right ascenion [frac deg].
max_ra (float): Maximum right ascenion [frac deg].
min_dec (float): Minimum declination [frac deg].
max_dec (float): Maximum declination [frac deg].
"""
# Use your own function
if not isinstance(model, str):
self.direction_func = lambda: model(**kwargs)
return
# Or use a uniform distribution
if model == 'uniform':
self.direction_func = lambda: did.uniform(n_srcs=self.n_srcs,
**kwargs)
else:
raise ValueError('set_direction input not recognised')
def gen_direction(self):
"""Generate the direction of frbs."""
frbs = self.frbs
# Calculate right ascenion and declination
frbs.ra, frbs.dec = self.direction_func()
# Convert to galactic lat/long coordinates
frbs.gl, frbs.gb = go.radec_to_lb(frbs.ra, frbs.dec, frac=True)
def set_dm_mw(self, model='ne2001', **kwargs):
"""Set the model for the Milky Way dispersion measure.
Args:
model (str): Option of 'ne2001'.
"""
if not isinstance(model, str):
self.dm_mw_func = lambda: model(**kwargs)
return
# Distribution from which to draw dm_mw
if model == 'ne2001':
self.dm_mw_func = lambda: pc.NE2001Table().lookup(self.frbs.gl,
self.frbs.gb)
else:
raise ValueError('set_dm_mw input not recognised')
def gen_dm_mw(self):
"""Generate Milky Way dispersion measure."""
self.frbs.dm_mw = self.dm_mw_func()
def set_dm_igm(self, model='ioka', **kwargs):
"""Set intergalactic dispersion measure model.
Args:
model (str): Option of 'ioka'.
if model == 'ioka':
slope (float): Slope of the DM-z relationship.
std (float): Spread around the DM-z relationship.
spread_dist (str): 'normal' or 'lognormal'.
"""
# Possibility to use your own function
if not isinstance(model, str):
self.dm_igm_func = lambda: model(**kwargs)
return
# Distribution from which to draw intergalactic dm
if model == 'ioka':
self.dm_igm_func = lambda: dmd.ioka(z=self.frbs.z, **kwargs)
else:
raise ValueError('set_dm_igm input not recognised')
def gen_dm_igm(self):
"""Generate intergalactic dispersion measure."""
self.frbs.dm_igm = self.dm_igm_func()
def set_dm_host(self, model='gauss', **kwargs):
"""Set host galaxy dispersion measure.
Args:
model (str): Options from ('gauss', 'lognormal').
if model in ('gauss', 'lognormal'):
mean (float): Mean DM [pc/cm^3].
std (float): Standard deviation DM [pc/cm^3].
if model == 'constant':
value (float): Value to adopt [pc/cm^3].
"""
if not isinstance(model, str):
self.dm_host_func = lambda: model(**kwargs)
return
# Distribution from which to draw host dispersion measure
if model.startswith('gauss'):
self.dm_host_func = lambda: dmd.gauss(z=self.frbs.z,
n_srcs=self.n_srcs,
**kwargs)
elif model == 'lognormal':
self.dm_host_func = lambda: dmd.lognormal(z=self.frbs.z,
n_srcs=self.n_srcs,
**kwargs)
elif model == 'constant':
self.dm_host_func = lambda: dmd.constant(n_srcs=self.n_srcs,
**kwargs)
else:
raise ValueError('set_dm_host input not recognised')
def gen_dm_host(self):
"""Generate host dispersion measure."""
self.frbs.dm_host = self.dm_host_func()
def set_dm(self, mw=True, igm=True, host=True):
"""Set total dispersion measure.
Args:
mw (bool): Whether to include a Milky Way component
igm (bool): Whether to include an IGM component
host (bool): Whether to include a host galaxy component
"""
# Which components to include
self.dm_components = []
if mw:
self.dm_components.append(self.gen_dm_mw)
if igm:
self.dm_components.append(self.gen_dm_igm)
if host:
self.dm_components.append(self.gen_dm_host)
# Save those components to execute at a later stage
def run_dm():
[c() for c in self.dm_components]
return self.frbs.dm_mw + self.frbs.dm_igm + self.frbs.dm_host
self.dm_func = run_dm
def gen_dm(self):
"""Generate total dispersion measure."""
self.frbs.dm = self.dm_func()
def set_w(self, model='uniform', per_source='same', **kwargs):
"""Set intrinsic pulse widths model [ms].
Args:
model (str): Options from ('uniform', 'lognormal')
per_source (str): Model for a single source burst
distribution. Options from 'same' or 'different'
If model == 'constant':
value (float): Pulse width [ms].
If model == 'uniform':
low (float): Minimum pulse width [ms].
high (float): Maximum pulse width [ms].
If model == 'lognormal':
mean (float): Mean pulse width [ms].
std (float): Standard deviation pulse width [ms].
"""
# Each burst from the same source: same or different widths?
if per_source == 'same':
self.w_shape = lambda: self.n_srcs
elif per_source == 'different':
self.w_shape = lambda: self.shape
# Distribution from which to draw pulse widths
# Find available distributions to draw from
funcs = [d for d in dir(wd) if hasattr(getattr(wd, d), '__call__')]
funcs.remove('calc_w_arr')
# Set function
if model in funcs:
func = getattr(wd, model)
# If you're getting fancy with combined distributions
# See examples/adapting_population_parameters.py
self._transpose_w = False
for kw_value in kwargs.values():
if isinstance(kw_value, (list, np.ndarray)):
self.w_shape = lambda: self.shape[::-1]
self._transpose_w = True
self.w_func = lambda x: func(shape=x, z=self.frbs.z, **kwargs)
else:
raise ValueError('set_w input model not recognised')
def gen_w(self):
"""Generate pulse widths [ms]."""
shape = self.w_shape()
self.frbs.w_int, self.frbs.w_arr = self.w_func(shape)
# From combined distribution inputs
if self._transpose_w:
self.frbs.w_int = self.frbs.w_int.T
self.frbs.w_arr = self.frbs.w_arr.T
def set_si(self, model='gauss', per_source='same', **kwargs):
"""Set spectral index model.
Args:
model (str): Options from ('gauss')
per_source (str): Model for a single source burst
distribution. Options from ('same', 'different')
If model == 'constant':
value (float): Default spectal index.
If model == 'gauss':
mean (float): Mean spectral index
std (float): Standard deviation spectral index
"""
# Each burst from the same source: same or different si?
if per_source == 'same':
self.si_shape = lambda: self.n_srcs
elif per_source == 'different':
self.si_shape = lambda: self.shape
# Find available distributions to draw from
funcs = [d for d in dir(sid) if hasattr(getattr(sid, d), '__call__')]
# Set function
if model in funcs:
func = getattr(sid, model)
# If you're getting fancy with combined distributions
self._transpose_si = False
for kw_value in kwargs.values():
if isinstance(kw_value, (list, np.ndarray)):
self.si_shape = lambda: self.shape[::-1]
self._transpose_si = True
# Distribution from which to draw spectral indices
self.si_func = lambda x: func(shape=x, **kwargs)
else:
raise ValueError('set_si input not recognised')
def gen_si(self):
"""Generate spectral indices."""
shape = self.si_shape()
self.frbs.si = self.si_func(shape)
if self._transpose_si:
self.frbs.si = self.frbs.si.T
def set_lum(self, model='powerlaw', per_source='same', **kwargs):
"""Set luminosity function [ergs/s].
Args:
model (str): Options from ('powerlaw')
per_source (str): Model for a single source burst
distribution. Options from ('same', 'different')
If model == 'powerlaw':
low (float): Minimum bolometric luminosity [ergs/s]
high (float): Maximum bolometric luminosity [ergs/s]
power (float): Power of luminosity function
If model == 'constant':
value (float): Value for standard candle [ergs/s]
"""
# Each burst from the same source: same or different luminosities?
if per_source == 'same':
self.lum_shape = lambda: self.n_srcs
elif per_source == 'different':
self.lum_shape = lambda: self.shape
# Find available distributions to draw from
funcs = [d for d in dir(ld) if hasattr(getattr(ld, d), '__call__')]
# Set function
if model in funcs:
func = getattr(ld, model)
# Help out the user
for s in ['slope', 'index']:
if s in kwargs:
kwargs['power'] = kwargs.pop(s)
# If you're getting fancy with combined distributions
self._transpose_lum = False
for kw_value in kwargs.values():
if isinstance(kw_value, (list, np.ndarray)):
self.lum_shape = lambda: self.shape[::-1]
self._transpose_lum = True
# Distribution from which to draw luminosities
self.lum_func = lambda x: func(shape=x, **kwargs)
else:
raise ValueError('set_lum input not recognised')
def gen_lum(self):
"""Generate luminosities [ergs/s]."""
shape = self.lum_shape()
self.frbs.lum_bol = self.lum_func(shape)
# You need multiple luminosities if repeaters
if self.repeaters and self.frbs.lum_bol.ndim == 1:
repeat_lums = [self.frbs.lum_bol[..., np.newaxis]]*self.shape[1]
self.frbs.lum_bol = np.concatenate(repeat_lums, axis=1)
if self._transpose_lum:
self.frbs.lum_bol = self.frbs.lum_bol.T
def set_time(self, model='regular', **kwargs):
"""Set model from which to generate time stamps.
Args:
model (str): Options from ('single', 'regular', 'clustered',
'poisson', 'cyclic')
If model == 'regular':
rate (float): Number of bursts per day
If model == 'poisson':
rate (float): Expected number of bursts per day
If model == 'clustered':
r (float): Rate parameter
k (float): Shape parameter
If model == 'cyclic':
rate (float): Number of bursts per day
period (float): Period of activity cycle (days)
frac (float): Fraction of activity cycle a source is active
"""
if not isinstance(model, str):
# These lambda functions look complex, but aren't.
# They merely stop the function from running immediately
self.time_func = lambda: model(**kwargs)
return
# Find available distributions
funcs = [d for d in dir(td) if hasattr(getattr(td, d), '__call__')]
internal = ['gamma', 'iteratively_gen_times', '_weibull_dist',
'_poisson_dist']
for f in internal:
funcs.remove(f)
# Set function
if model in funcs:
func = getattr(td, model)
# Distribution from which to draw time stamps
self.time_func = lambda: func(n_srcs=self.n_srcs,
n_days=self.n_days,
z=self.frbs.z,
**kwargs)
else:
raise ValueError('set_time input not recognised')
def gen_time(self):
"""Generate time stamps."""
# Only relevant for repeaters
if not self.repeaters:
return
pprint('Adding burst times')
self.frbs.time = self.time_func()
# Set size for all other parameters
self.shape = self.frbs.time.shape
pprint('Finished adding burst times')
def generate(self):
"""Generate a full CosmicPopulation."""
pprint(f'Generating {self.name} population')
self.gen_index()
self.gen_dist()
self.gen_time()
self.gen_direction()
self.gen_gal_coords()
self.gen_dm()
self.gen_w()
self.gen_lum()
self.gen_si()
pprint(f'Finished generating {self.name} population')
@classmethod
def simple(cls, n_srcs, n_days=1, repeaters=False, generate=False):
"""Set up a simple, local population."""
pop = cls(n_srcs=n_srcs, n_days=n_days, name='simple',
repeaters=repeaters, generate=False)
pop.set_dist(model='vol_co', z_max=0.01, alpha=-1.5,
H_0=67.74, W_m=0.3089, W_v=0.6911)
pop.set_dm(mw=False, igm=False, host=False)
pop.set_emission_range(low=10e7, high=10e9)
pop.set_lum(model='constant', value=1e38)
pop.set_w(model='constant', value=10)
pop.set_si(model='constant', value=0)
if pop.repeaters:
pop.set_time(model='regular', rate=2)
if generate:
pop.generate()
return pop
@classmethod
def complex(cls, n_srcs, n_days=1, repeaters=False, generate=False):
"""Set up a complex population."""
pop = cls(n_srcs=n_srcs, n_days=n_days, name='complex',
repeaters=repeaters, generate=False)
pop.set_dist(model='vol_co', z_max=1, alpha=-1.5,
H_0=67.74, W_m=0.3089, W_v=0.6911)
pop.set_dm_host(model='gauss', mean=100, std=200)
pop.set_dm_igm(model='ioka', slope=1000, std=None)
pop.set_dm_mw(model='ne2001')
pop.set_dm(mw=True, igm=True, host=True)
pop.set_emission_range(low=10e7, high=10e9)
pop.set_lum(model='powerlaw', low=1e40, high=1e45, power=0)
pop.set_w(model='lognormal', mean=0.1, std=1)
pop.set_si(model='gauss', mean=-1.4, std=1)
if pop.repeaters:
pop.set_time(model='poisson', rate=9)
if generate:
pop.generate()
return pop
@classmethod
def optimal(cls, n_srcs, n_days=1, repeaters=False, generate=False):
"""Set up an optimal population."""
pop = cls(n_srcs=n_srcs, n_days=n_days, name='optimal',
repeaters=repeaters, generate=False)
pop.set_dist(model='vol_co', z_max=2.5, alpha=-2.2,
H_0=67.74, W_m=0.3089, W_v=0.6911)
pop.set_dm_host(model='constant', value=50)
pop.set_dm_igm(model='ioka', slope=1000, std=None)
pop.set_dm_mw(model='ne2001')
pop.set_dm(mw=True, igm=True, host=True)
pop.set_emission_range(low=10e7, high=10e9)
pop.set_lum(model='powerlaw', low=1e40, high=1e45, power=-0.8)
pop.set_w(model='lognormal', mean=6.3e-3, std=.6)
pop.set_si(model='constant', value=-0.4)
if pop.repeaters:
pop.set_time(model='poisson', rate=9)
if generate:
pop.generate()
return pop
if __name__ == '__main__':
# Quick test whether everything seems to be working or not
import os
import matplotlib.pyplot as plt
pop = CosmicPopulation(1e4)
pop.generate()
frbs = pop.frbs
for arg in frbs.__dict__:
pprint(f'Plotting {arg}')
values = getattr(frbs, arg)
if values is not None:
plt.hist(values, bins=50)
plt.xlabel(arg)
p = f'../tests/plots/{arg}.png'
p = os.path.join(os.path.abspath(os.path.dirname(__file__)), p)
plt.savefig(p)
plt.clf()
| {
"content_hash": "d19d2c6e83325f4609e1ac1a4521ab09",
"timestamp": "",
"source": "github",
"line_count": 587,
"max_line_length": 79,
"avg_line_length": 36.52810902896082,
"alnum_prop": 0.5515810092342133,
"repo_name": "davidgardenier/frbpoppy",
"id": "8ac420e5243c258b16771deea12e817cff298109",
"size": "21445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frbpoppy/cosmic_pop.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "80481"
},
{
"name": "HTML",
"bytes": "3209"
},
{
"name": "Python",
"bytes": "178609"
},
{
"name": "Shell",
"bytes": "381"
}
],
"symlink_target": ""
} |
'''
# 1 create Company
# 2 Create sub department and set virtual id as OrganizationSupervisor
# (1) not less than 5 grades
# (2) the first three levels have only one department,and the latter two have ten brothers
# (3) the first three levels have two members in each department,and the latter two levels have 20 members in each department
# 3 delete
@author: Yuling.Ren
'''
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.operations.iam2_operations as iam2_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.test_util as test_util
virtual_id_uuid_list = []
def test():
global virtual_id_uuid_list
# 1 create Company
company_uuid_01 = iam2_ops.create_iam2_organization('test_company_01', 'Company').uuid
department_name="test_department"
# 2 Create sub department and set virtual id as OrganizationSupervisor
for i in range(1,2):
department1_uuid = iam2_ops.create_iam2_organization(department_name + "_" + str(i), 'Department',parent_uuid=company_uuid_01).uuid
for n1 in range(1,3):
username = department_name + "_" + str(i) + "_user_" + str(n1)
virtual_id_uuid = iam2_ops.create_iam2_virtual_id(username, 'password').uuid
iam2_ops.add_iam2_virtual_ids_to_organization([virtual_id_uuid], department1_uuid)
virtual_id_uuid_list.append(virtual_id_uuid)
attributes = [{"name": "__OrganizationSupervisor__", "value": virtual_id_uuid}]
iam2_ops.add_attributes_to_iam2_organization(department1_uuid, attributes)
for j in range(1,2):
department2_uuid = iam2_ops.create_iam2_organization(department_name+ "_" + str(i) + "_" + str(j), 'Department',parent_uuid=department1_uuid).uuid
for n2 in range(1,3):
username=department_name+ "_" + str(i) + "_" + str(j) + "_user_" + str(n2)
virtual_id_uuid = iam2_ops.create_iam2_virtual_id(username, 'password').uuid
iam2_ops.add_iam2_virtual_ids_to_organization([virtual_id_uuid], department2_uuid)
virtual_id_uuid_list.append(virtual_id_uuid)
attributes = [{"name": "__OrganizationSupervisor__", "value": virtual_id_uuid}]
iam2_ops.add_attributes_to_iam2_organization(department2_uuid, attributes)
for k in range(1,2):
department3_uuid = iam2_ops.create_iam2_organization(department_name+ "_" + str(i) + "_" + str(j) + "_" +str(k), 'Department',parent_uuid=department2_uuid).uuid
for n3 in range(1,3):
username=department_name+ "_" + str(i) + "_" + str(j) + "_" +str(k) + "_user_" + str(n3)
virtual_id_uuid = iam2_ops.create_iam2_virtual_id(username, 'password').uuid
iam2_ops.add_iam2_virtual_ids_to_organization([virtual_id_uuid], department3_uuid)
virtual_id_uuid_list.append(virtual_id_uuid)
attributes = [{"name": "__OrganizationSupervisor__", "value": virtual_id_uuid}]
iam2_ops.add_attributes_to_iam2_organization(department3_uuid, attributes)
for l in range(1,11):
department4_uuid = iam2_ops.create_iam2_organization(department_name+ "_" + str(i) + "_" + str(j) + "_" +str(k) + "_" +str(l), 'Department',parent_uuid=department3_uuid).uuid
for n4 in range(1,21):
username=department_name+ "_" + str(i) + "_" + str(j) + "_" +str(k) + "_" +str(l) + "_user_" + str(n4)
virtual_id_uuid = iam2_ops.create_iam2_virtual_id(username, 'password').uuid
iam2_ops.add_iam2_virtual_ids_to_organization([virtual_id_uuid], department4_uuid)
virtual_id_uuid_list.append(virtual_id_uuid)
attributes = [{"name": "__OrganizationSupervisor__", "value": virtual_id_uuid}]
iam2_ops.add_attributes_to_iam2_organization(department4_uuid, attributes)
for m in range(1,11):
department5_uuid = iam2_ops.create_iam2_organization(department_name+ "_" + str(i) + "_" + str(j) + "_" +str(k) + "_" +str(l) + "_" +str(m), 'Department',parent_uuid=department4_uuid).uuid
for n5 in range(1,21):
username=department_name+ "_" + str(i) + "_" + str(j) + "_" +str(k) + "_" +str(l) + "_" +str(m) + "_user_" + str(n5)
virtual_id_uuid = iam2_ops.create_iam2_virtual_id(username, 'password').uuid
iam2_ops.add_iam2_virtual_ids_to_organization([virtual_id_uuid], department5_uuid)
virtual_id_uuid_list.append(virtual_id_uuid)
attributes = [{"name": "__OrganizationSupervisor__", "value": virtual_id_uuid}]
iam2_ops.add_attributes_to_iam2_organization(department5_uuid, attributes)
# 3 delete
iam2_ops.delete_iam2_organization(company_uuid_01)
for virtual_id_uuid in virtual_id_uuid_list:
iam2_ops.delete_iam2_virtual_id(virtual_id_uuid)
virtual_id_uuid_list=[]
iam2_ops.clean_iam2_enviroment()
test_util.test_pass('success')
# Will be called only if exception happens in test().
def error_cleanup():
global company_uuid_01 , virtual_id_uuid
if company_uuid_01:
iam2_ops.delete_iam2_organization(company_uuid_01)
if virtual_id_uuid_list:
for virtual_id_uuid in virtual_id_uuid_list:
iam2_ops.delete_iam2_virtual_id(virtual_id_uuid)
iam2_ops.clean_iam2_enviroment() | {
"content_hash": "dc5a61b61011bace4d67caba1d910739",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 212,
"avg_line_length": 59.93617021276596,
"alnum_prop": 0.6119985800496983,
"repo_name": "zstackorg/zstack-woodpecker",
"id": "10e77b6938454fd2a9acee26f4f83836ed1557b1",
"size": "5634",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integrationtest/vm/simulator/iam2/test_iam2_stress_admin_login.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "46522"
},
{
"name": "Makefile",
"bytes": "692"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "2891030"
},
{
"name": "Shell",
"bytes": "54266"
}
],
"symlink_target": ""
} |
from oslo_log import log
import six
from keystone.auth.plugins import base
from keystone.auth.plugins import mapped
from keystone.common import dependency
from keystone.common import wsgi
import keystone.conf
from keystone import exception
from keystone.i18n import _
from keystone.models import token_model
LOG = log.getLogger(__name__)
CONF = keystone.conf.CONF
@dependency.requires('federation_api', 'identity_api', 'token_provider_api')
class Token(base.AuthMethodHandler):
def _get_token_ref(self, auth_payload):
token_id = auth_payload['id']
response = self.token_provider_api.validate_token(token_id)
return token_model.KeystoneToken(token_id=token_id,
token_data=response)
def authenticate(self, request, auth_payload):
if 'id' not in auth_payload:
raise exception.ValidationError(attribute='id',
target='token')
token_ref = self._get_token_ref(auth_payload)
if token_ref.is_federated_user and self.federation_api:
response_data = mapped.handle_scoped_token(
request, token_ref, self.federation_api, self.identity_api)
else:
response_data = token_authenticate(request,
token_ref)
# NOTE(notmorgan): The Token auth method is *very* special and sets the
# previous values to the method_names. This is because it can be used
# for re-scoping and we want to maintain the values. Most
# AuthMethodHandlers do no such thing and this is not required.
response_data.setdefault('method_names', []).extend(token_ref.methods)
return base.AuthHandlerResponse(status=True, response_body=None,
response_data=response_data)
def token_authenticate(request, token_ref):
response_data = {}
try:
# Do not allow tokens used for delegation to
# create another token, or perform any changes of
# state in Keystone. To do so is to invite elevation of
# privilege attacks
if token_ref.oauth_scoped:
raise exception.ForbiddenAction(
action=_(
'Using OAuth-scoped token to create another token. '
'Create a new OAuth-scoped token instead'))
elif token_ref.trust_scoped:
raise exception.ForbiddenAction(
action=_(
'Using trust-scoped token to create another token. '
'Create a new trust-scoped token instead'))
if not CONF.token.allow_rescope_scoped_token:
# Do not allow conversion from scoped tokens.
if token_ref.project_scoped or token_ref.domain_scoped:
raise exception.ForbiddenAction(
action=_('rescope a scoped token'))
wsgi.validate_token_bind(request.context_dict, token_ref)
# New tokens maintain the audit_id of the original token in the
# chain (if possible) as the second element in the audit data
# structure. Look for the last element in the audit data structure
# which will be either the audit_id of the token (in the case of
# a token that has not been rescoped) or the audit_chain id (in
# the case of a token that has been rescoped).
try:
token_audit_id = token_ref.get('audit_ids', [])[-1]
except IndexError:
# NOTE(morganfainberg): In the case this is a token that was
# issued prior to audit id existing, the chain is not tracked.
token_audit_id = None
response_data.setdefault('expires_at', token_ref.expires)
response_data['audit_id'] = token_audit_id
response_data.setdefault('user_id', token_ref.user_id)
# TODO(morganfainberg: determine if token 'extras' can be removed
# from the response_data
response_data.setdefault('extras', {}).update(
token_ref.get('extras', {}))
return response_data
except AssertionError as e:
LOG.error(six.text_type(e))
raise exception.Unauthorized(e)
| {
"content_hash": "1e442072af32d1814154a1ae2a25dba7",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 79,
"avg_line_length": 40.9126213592233,
"alnum_prop": 0.624110109159943,
"repo_name": "ilay09/keystone",
"id": "8c8be5233804bae745723c2aff9d619fd1638b50",
"size": "4800",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/auth/plugins/token.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "5000747"
},
{
"name": "Shell",
"bytes": "7522"
}
],
"symlink_target": ""
} |
import unittest
from conans.test.utils.tools import TestServer, TestClient
from conans.model.ref import ConanFileReference
import os
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.paths import CONANFILE, CONANFILE_TXT
from conans.util.files import load
generator = """
from conans.model import Generator
from conans.paths import BUILD_INFO
from conans import ConanFile, CMake
class MyCustomGenerator(Generator):
@property
def filename(self):
return "customfile.gen"
@property
def content(self):
return "My custom generator content"
class MyCustomGeneratorPackage(ConanFile):
name = "MyCustomGen"
version = "0.2"
"""
consumer = """
[requires]
Hello0/0.1@lasote/stable
MyCustomGen/0.2@lasote/stable
[generators]
MyCustomGenerator
"""
generator_multi = """
from conans.model import Generator
from conans.paths import BUILD_INFO
from conans import ConanFile, CMake
class MyCustomMultiGenerator(Generator):
@property
def filename(self):
return "customfile.gen"
@property
def content(self):
return {"file1.gen": "CustomContent1",
"file2.gen": "CustomContent2"}
class NoMatterTheName(ConanFile):
name = "MyCustomGen"
version = "0.2"
"""
consumer_multi = """
[requires]
MyCustomGen/0.2@lasote/stable
[generators]
MyCustomMultiGenerator
"""
class CustomGeneratorTest(unittest.TestCase):
def setUp(self):
test_server = TestServer()
self.servers = {"default": test_server}
def reuse_test(self):
conan_reference = ConanFileReference.loads("Hello0/0.1@lasote/stable")
files = cpp_hello_conan_files("Hello0", "0.1", build=False)
client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
client.save(files)
client.run("export lasote/stable")
client.run("upload %s" % str(conan_reference))
gen_reference = ConanFileReference.loads("MyCustomGen/0.2@lasote/stable")
files = {CONANFILE: generator}
client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
client.save(files)
client.run("export lasote/stable")
client.run("upload %s" % str(gen_reference))
# Test local, no retrieval
files = {CONANFILE_TXT: consumer}
client.save(files, clean_first=True)
client.run("install --build")
generated = load(os.path.join(client.current_folder, "customfile.gen"))
self.assertEqual(generated, "My custom generator content")
# Test retrieval from remote
client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
files = {CONANFILE_TXT: consumer}
client.save(files)
client.run("install --build")
generated = load(os.path.join(client.current_folder, "customfile.gen"))
self.assertEqual(generated, "My custom generator content")
def multifile_test(self):
gen_reference = ConanFileReference.loads("MyCustomGen/0.2@lasote/stable")
client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
files = {CONANFILE: generator_multi}
client.save(files)
client.run("export lasote/stable")
client.run("upload %s" % str(gen_reference))
# Test local, no retrieval
files = {CONANFILE_TXT: consumer_multi}
client.save(files, clean_first=True)
client.run("install --build")
self.assertIn("Generator MyCustomMultiGenerator is multifile. "
"Property 'filename' not used",
client.user_io.out)
for i in (1, 2):
generated = load(os.path.join(client.current_folder, "file%d.gen" % i))
self.assertEqual(generated, "CustomContent%d" % i)
| {
"content_hash": "6e791220b422b5181349ac30c316c200",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 92,
"avg_line_length": 30.919354838709676,
"alnum_prop": 0.6604068857589984,
"repo_name": "mropert/conan",
"id": "f547a7459a1fb2cee14b25e72fec0009d05b9e96",
"size": "3834",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/integration/custom_generator_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "653"
},
{
"name": "Python",
"bytes": "1898890"
},
{
"name": "Shell",
"bytes": "1342"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/creature/npc/base/shared_gran_base_male.iff"
result.attribute_template_id = 9
result.stfName("npc_name","gran_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "893194c7dc2d52280320d39f170e6647",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 71,
"avg_line_length": 23.384615384615383,
"alnum_prop": 0.6907894736842105,
"repo_name": "anhstudios/swganh",
"id": "ac6f59ac075d446f8015bc20baf9a1f8d4f984cb",
"size": "449",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/creature/npc/base/shared_gran_base_male.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import codecs
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def __init__(self, *args, **kwargs):
TestCommand.__init__(self, *args, **kwargs)
self.test_suite = True
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
raise SystemExit(errno)
with codecs.open('README.rst', encoding='utf8') as readme_file:
README = readme_file.read()
with codecs.open('HISTORY.rst', encoding='utf8') as history_file:
HISTORY = history_file.read().replace('.. :changelog:', '')
LONG_DESCRIPTION = README + '\n\n' + HISTORY
INSTALL_REQUIRES_REPLACEMENTS = {
'https://github.com/ethereum/ethash/tarball/master#egg=pyethash': 'pyethash',
}
INSTALL_REQUIRES = list()
with open('requirements.txt') as requirements_file:
for requirement in requirements_file:
dependency = INSTALL_REQUIRES_REPLACEMENTS.get(
requirement.strip(),
requirement.strip(),
)
INSTALL_REQUIRES.append(dependency)
INSTALL_REQUIRES = list(set(INSTALL_REQUIRES))
# *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
# see: https://github.com/ethereum/pyethapp/wiki/Development:-Versions-and-Releases
version = '1.2.3'
setup(
name='pyethapp',
version=version,
description='Python Ethereum Client',
long_description=LONG_DESCRIPTION,
author='HeikoHeiko',
author_email='heiko@ethdev.com',
url='https://github.com/ethereum/pyethapp',
packages=[
'pyethapp',
],
package_data={
'pyethapp': ['genesisdata/*.json']
},
license='BSD',
zip_safe=False,
keywords='pyethapp',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
cmdclass={'test': PyTest},
install_requires=INSTALL_REQUIRES,
tests_require=[
'ethereum-serpent>=1.8.1',
],
entry_points='''
[console_scripts]
pyethapp=pyethapp.app:app
'''
)
| {
"content_hash": "a5cb405bcb04788e61d83fbd45102413",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 85,
"avg_line_length": 28.571428571428573,
"alnum_prop": 0.6425,
"repo_name": "RomanZacharia/pyethapp",
"id": "193017054f4981016d719fff58e0486782e6e5a0",
"size": "2446",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "299219"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
} |
import unittest
from conans.errors import ConanException
from conans.paths import CONANFILE, BUILD_INFO
from conans.test.utils.tools import TestClient
from conans.util.files import load, mkdir
import os
class SourceTest(unittest.TestCase):
def source_reference_test(self):
client = TestClient()
error = client.run("source lib/1.0@conan/stable", ignore_error=True)
self.assertTrue(error)
self.assertIn("'conan source' doesn't accept a reference anymore", client.out)
def source_local_cwd_test(self):
conanfile = '''
import os
from conans import ConanFile
class ConanLib(ConanFile):
name = "Hello"
version = "0.1"
def source(self):
self.output.info("Running source!")
self.output.info("cwd=>%s" % os.getcwd())
'''
client = TestClient()
client.save({CONANFILE: conanfile})
subdir = os.path.join(client.current_folder, "subdir")
os.mkdir(subdir)
client.run("install . --install-folder subdir")
client.run("source . --install-folder subdir --source_folder subdir")
self.assertIn("PROJECT: Configuring sources", client.user_io.out)
self.assertIn("PROJECT: cwd=>%s" % subdir, client.user_io.out)
def local_source_src_not_exist_test(self):
conanfile = '''
import os
from conans import ConanFile
class ConanLib(ConanFile):
name = "Hello"
version = "0.1"
def source(self):
pass
'''
client = TestClient()
client.save({CONANFILE: conanfile})
# Automatically created
client.run("source . --source_folder=src")
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "src")))
def build_folder_no_exists_crash_test(self):
conanfile = '''
import os
from conans import ConanFile
class ConanLib(ConanFile):
name = "Hello"
version = "0.1"
def source(self):
pass
'''
client = TestClient()
client.save({CONANFILE: conanfile})
# Automatically created
error = client.run("source . --install-folder=missing_folder", ignore_error=True)
self.assertTrue(error)
self.assertIn("Specified info-folder doesn't exist", client.out)
def build_folder_reading_infos_test(self):
conanfile = '''
import os
from conans import ConanFile
class ConanLib(ConanFile):
name = "Hello"
version = "0.1"
def package_info(self):
self.cpp_info.cppflags.append("FLAG")
self.env_info.MYVAR = "foo"
self.user_info.OTHERVAR = "bar"
'''
client = TestClient()
client.save({CONANFILE: conanfile})
client.run("export conan/testing")
conanfile = '''
import os
from conans import ConanFile
from conans.util.files import save
class ConanLib(ConanFile):
requires="Hello/0.1@conan/testing"
def source(self):
assert(os.getcwd() == self.source_folder)
self.output.info("FLAG=%s" % self.deps_cpp_info["Hello"].cppflags[0])
self.output.info("MYVAR=%s" % self.deps_env_info["Hello"].MYVAR)
self.output.info("OTHERVAR=%s" % self.deps_user_info["Hello"].OTHERVAR)
self.output.info("CURDIR=%s" % os.getcwd())
'''
# First, failing source()
client.save({CONANFILE: conanfile}, clean_first=True)
build_folder = os.path.join(client.current_folder, "build")
src_folder = os.path.join(client.current_folder, "src")
mkdir(build_folder)
mkdir(src_folder)
client.run("source . --install-folder='%s' --source_folder='%s'" % (build_folder, src_folder),
ignore_error=True)
self.assertIn("self.deps_cpp_info not defined.", client.out)
client.run("install . --install-folder build --build ")
client.run("source . --install-folder='%s' --source_folder='%s'" % (build_folder, src_folder),
ignore_error=True)
self.assertIn("FLAG=FLAG", client.out)
self.assertIn("MYVAR=foo", client.out)
self.assertIn("OTHERVAR=bar", client.out)
self.assertIn("CURDIR=%s" % src_folder, client.out)
def repeat_args_fails_test(self):
conanfile = '''
from conans import ConanFile
class ConanLib(ConanFile):
def source(self):
pass
'''
client = TestClient()
client.save({CONANFILE: conanfile})
client.run("source . --source-folder sf")
with self.assertRaisesRegexp(Exception, "Command failed"):
client.run("source . --source-folder sf --source-folder sf")
with self.assertRaisesRegexp(Exception, "Command failed"):
client.run("source . --source-folder sf --install-folder if --install-folder rr")
def local_source_test(self):
conanfile = '''
from conans import ConanFile
from conans.util.files import save
class ConanLib(ConanFile):
def source(self):
self.output.info("Running source!")
err
save("file1.txt", "Hello World")
'''
# First, failing source()
client = TestClient()
client.save({CONANFILE: conanfile,
BUILD_INFO: ""})
client.run("source .", ignore_error=True)
self.assertIn("PROJECT: Running source!", client.user_io.out)
self.assertIn("ERROR: PROJECT: Error in source() method, line 9", client.user_io.out)
# Fix the error and repeat
client.save({CONANFILE: conanfile.replace("err", "")})
client.run("source .")
self.assertIn("PROJECT: Configuring sources in", client.user_io.out)
self.assertIn("PROJECT: Running source!", client.user_io.out)
self.assertEqual("Hello World", load(os.path.join(client.current_folder, "file1.txt")))
| {
"content_hash": "006e09a87afbeca7313da15a2ca2d461",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 102,
"avg_line_length": 33.292397660818715,
"alnum_prop": 0.6305989812049886,
"repo_name": "lasote/conan",
"id": "56a351fbe961b875a256199aa743f1d8da658d14",
"size": "5693",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/command/source_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1124"
},
{
"name": "Python",
"bytes": "2480006"
},
{
"name": "Shell",
"bytes": "1912"
}
],
"symlink_target": ""
} |
import json
import requests
import jwt
from jwt import DecodeError
from datetime import datetime, timedelta
from app.models import User
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.generic import View
from django.core import serializers
class social(View):
def create_token(self, user):
payload = {
'sub': user.id,
'iat': datetime.utcnow(),
'exp': datetime.utcnow() + timedelta(days=14)
}
token = jwt.encode(payload, settings.TOKEN_SECRET)
return token.decode('unicode_escape')
def parse_token(self, req):
token = req.META.get('HTTP_AUTHORIZATION').split()[1]
return jwt.decode(token, settings.TOKEN_SECRET)
def me(self, request, *args, **kwargs):
try:
payload = self.parse_token(request)
except DecodeError:
return HttpResponse(json.dumps({ 'destroy': True }), status=401)
except Exception:
return HttpResponse(None, status=401)
id = payload['sub']
user = User.objects.filter(id=id).first()
if not user:
return HttpResponse(json.dumps({ 'destroy': True }), status=401)
serialized = serializers.serialize('json', [user])
data = json.loads(serialized)
return HttpResponse(json.dumps(data[0]))
def callback(self, request, *args, **kwargs):
access_token_url = 'https://graph.facebook.com/v2.3/oauth/access_token'
graph_api_url = 'https://graph.facebook.com/v2.3/me?fields=name,email,picture'
params = json.loads(request.body)
params = {
'client_id': params['clientId'],
'redirect_uri': params['redirectUri'],
'client_secret': settings.FACEBOOK_SECRET,
'code': params['code'],
}
r = requests.post(access_token_url, params)
access_token = json.loads(r.text)
# GET => https://graph.facebook.com/v2.3/me
# to get user profile (email, first_name, last_name, gender, id)
r = requests.get(graph_api_url, params=access_token)
profile = json.loads(r.text)
if request.META.get('HTTP_AUTHORIZATION'):
user = User.objects.filter(fb_id=profile['id']).first()
if user:
token = self.create_token(user)
return HttpResponse(json.dumps({ 'token': token }))
else:
return HttpResponse(json.dumps({ 'destroy': True }), status=401)
payload = parse_token(request)
user = User.objects.filter(fb_id=payload['sub']).first()
if not user:
return HttpResponseBadRequest()
user = User(fb_id = profile['id'],
name = profile['name'],
email = profile['email'],
picture = profile['picture']['data']['url'])
user.save()
token = self.create_token(user)
return HttpResponse(json.dumps({ 'token': token }))
user = User.objects.filter(fb_id=profile['id']).first()
if user:
token = self.create_token(user)
return HttpResponse(json.dumps({ 'token': token }))
user = User(fb_id = profile['id'],
name = profile['name'],
email = profile['email'],
picture = profile['picture']['data']['url'])
user.save()
token = self.create_token(user)
return HttpResponse(json.dumps({ 'token': token }))
| {
"content_hash": "ddd66529a7fcd8af9f04a6b8c1a8da72",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 86,
"avg_line_length": 33.89622641509434,
"alnum_prop": 0.5713888115780684,
"repo_name": "cesardeazevedo/sniffle",
"id": "54441dd84e52ec319057e0a08154df9959163836",
"size": "3593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/social.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1440"
},
{
"name": "HTML",
"bytes": "1075"
},
{
"name": "JavaScript",
"bytes": "5918"
},
{
"name": "Python",
"bytes": "8732"
},
{
"name": "Shell",
"bytes": "85"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import sys
import json
import shutil
import tempfile
import optparse
import zipfile
# Try to detect if we're running from source via the buck repo by
# looking for the .arcconfig file. If found, add the appropriate
# deps to our python path, so we can find the twitter libs and
# setuptools at runtime. Also, locate the `pkg_resources` modules
# via our local setuptools import.
if not zipfile.is_zipfile(sys.argv[0]):
# Remove twitter.common.python from the import path - it may be eagerly
# loaded as part of site-packages.
sys.modules.pop('twitter', None)
sys.modules.pop('twitter.common', None)
sys.modules.pop('twitter.common.python', None)
buck_root = os.sep.join(__file__.split(os.sep)[:-6])
sys.path.insert(0, os.path.join(
buck_root,
'third-party/py/twitter-commons/src/python'))
sys.path.insert(0, os.path.join(
buck_root, 'third-party/py/setuptools'))
pkg_resources_py = os.path.join(
buck_root,
'third-party/py/setuptools/pkg_resources.py')
# Otherwise, we're running from a PEX, so import the `pkg_resources`
# module via a resource.
else:
import pkg_resources
pkg_resources_py_tmp = tempfile.NamedTemporaryFile(
prefix='pkg_resources.py')
pkg_resources_py_tmp.write(
pkg_resources.resource_string(__name__, 'pkg_resources.py'))
pkg_resources_py_tmp.flush()
pkg_resources_py = pkg_resources_py_tmp.name
from twitter.common.python.pex_builder import PEXBuilder
from twitter.common.python.interpreter import PythonInterpreter
def dereference_symlinks(src):
"""
Resolve all symbolic references that `src` points to. Note that this
is different than `os.path.realpath` as path components leading up to
the final location may still be symbolic links.
"""
while os.path.islink(src):
src = os.path.join(os.path.dirname(src), os.readlink(src))
return src
def main():
parser = optparse.OptionParser(usage="usage: %prog [options] output")
parser.add_option('--entry-point', default='__main__')
parser.add_option('--no-zip-safe', action='store_false', dest='zip_safe', default=True)
parser.add_option('--python', default=sys.executable)
options, args = parser.parse_args()
if len(args) == 1:
output = args[0]
else:
parser.error("'output' positional argument is required")
return 1
# The manifest is passed via stdin, as it can sometimes get too large
# to be passed as a CLA.
manifest = json.load(sys.stdin)
# Setup a temp dir that the PEX builder will use as its scratch dir.
tmp_dir = tempfile.mkdtemp()
try:
# The version of pkg_resources.py (from setuptools) on some distros is
# too old for PEX. So we keep a recent version in the buck repo and
# force it into the process by constructing a custom PythonInterpreter
# instance using it.
interpreter = PythonInterpreter(
options.python,
PythonInterpreter.from_binary(options.python).identity,
extras={})
pex_builder = PEXBuilder(
path=tmp_dir,
interpreter=interpreter,
)
# Set whether this PEX as zip-safe, meaning everything will stayed zipped up
# and we'll rely on python's zip-import mechanism to load modules from
# the PEX. This may not work in some situations (e.g. native
# libraries, libraries that want to find resources via the FS).
pex_builder.info.zip_safe = options.zip_safe
# Set the starting point for this PEX.
pex_builder.info.entry_point = options.entry_point
# Copy in our version of `pkg_resources`.
pex_builder.add_source(
dereference_symlinks(pkg_resources_py),
os.path.join(pex_builder.BOOTSTRAP_DIR, 'pkg_resources.py'))
# Add the sources listed in the manifest.
for dst, src in manifest['modules'].iteritems():
# NOTE(agallagher): calls the `add_source` and `add_resource` below
# hard-link the given source into the PEX temp dir. Since OS X and
# Linux behave different when hard-linking a source that is a
# symbolic link (Linux does *not* follow symlinks), resolve any
# layers of symlinks here to get consistent behavior.
try:
pex_builder.add_source(dereference_symlinks(src), dst)
except OSError as e:
raise Exception("Failed to add {}: {}".format(src, e))
# Add resources listed in the manifest.
for dst, src in manifest['resources'].iteritems():
# NOTE(agallagher): see rationale above.
pex_builder.add_resource(dereference_symlinks(src), dst)
# Add prebuilt libraries listed in the manifest.
for req in manifest.get('prebuiltLibraries', []):
try:
pex_builder.add_dist_location(req)
except Exception as e:
raise Exception("Failed to add {}: {}".format(req, e))
# Add resources listed in the manifest.
for dst, src in manifest['nativeLibraries'].iteritems():
# NOTE(agallagher): see rationale above.
pex_builder.add_resource(dereference_symlinks(src), dst)
# Generate the PEX file.
pex_builder.build(output)
# Always try cleaning up the scratch dir, ignoring failures.
finally:
shutil.rmtree(tmp_dir, True)
sys.exit(main())
| {
"content_hash": "018e488972cd3f5b57b3aa6289d9fd9d",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 91,
"avg_line_length": 38.227586206896554,
"alnum_prop": 0.6516326898791268,
"repo_name": "rhencke/buck",
"id": "da1532863862c79ab52379c77eb34731380d7ad2",
"size": "5566",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/com/facebook/buck/python/pex.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "87"
},
{
"name": "Batchfile",
"bytes": "683"
},
{
"name": "C",
"bytes": "245946"
},
{
"name": "C#",
"bytes": "237"
},
{
"name": "C++",
"bytes": "4291"
},
{
"name": "CSS",
"bytes": "54863"
},
{
"name": "D",
"bytes": "899"
},
{
"name": "Go",
"bytes": "13543"
},
{
"name": "Groff",
"bytes": "440"
},
{
"name": "HTML",
"bytes": "4938"
},
{
"name": "IDL",
"bytes": "128"
},
{
"name": "Java",
"bytes": "11369310"
},
{
"name": "JavaScript",
"bytes": "931213"
},
{
"name": "Lex",
"bytes": "2442"
},
{
"name": "Makefile",
"bytes": "1791"
},
{
"name": "Matlab",
"bytes": "47"
},
{
"name": "OCaml",
"bytes": "2956"
},
{
"name": "Objective-C",
"bytes": "98934"
},
{
"name": "Objective-C++",
"bytes": "34"
},
{
"name": "PowerShell",
"bytes": "143"
},
{
"name": "Python",
"bytes": "226093"
},
{
"name": "Rust",
"bytes": "938"
},
{
"name": "Shell",
"bytes": "35199"
},
{
"name": "Smalltalk",
"bytes": "607"
},
{
"name": "Yacc",
"bytes": "323"
}
],
"symlink_target": ""
} |
from lib.logger import get_logger
from collections import OrderedDict
from tasks.hierarchy import DenormalizedHierarchy, GetParentsFunction, \
Hierarchy, HierarchyInfoUnion, HierarchyChildParentsUnion, \
HierarchyChildParent, LevelHierarchy, LevelInfo
from tasks.uk.cdrc import OutputAreas
from tasks.uk.census.ons import NUTS1UK
from tasks.uk.datashare import PostcodeAreas
from tasks.uk.gov import LowerLayerSuperOutputAreas, \
MiddleLayerSuperOutputAreas
from tasks.uk.odl import PostcodeSectors, PostcodeDistricts
LOGGER = get_logger(__name__)
COUNTRY = 'uk'
LEVEL_CLASSES = OrderedDict([
('cdrc_the_geom', OutputAreas),
('odl_ps_geo', PostcodeSectors),
('gov_lsoa_geo', LowerLayerSuperOutputAreas),
('gov_msoa_geo', MiddleLayerSuperOutputAreas),
('odl_pd_geo', PostcodeDistricts),
('datashare_pa_geo', PostcodeAreas),
('nuts1', NUTS1UK)
])
LEVELS = list(LEVEL_CLASSES.keys())
def geography(level):
return lambda year: geography_class(level)()
def geography_class(level):
return LEVEL_CLASSES[level]
class UKTask:
@property
def _country(self):
return COUNTRY
class UKDenormalizedHierarchy(UKTask, DenormalizedHierarchy):
def requires(self):
return {
'data': UKHierarchy(year=self.year),
'function': UKGetParentsFunction(year=self.year),
'rel': UKHierarchyChildParentsUnion(year=self.year, levels=LEVELS)
}
class UKGetParentsFunction(UKTask, GetParentsFunction):
def requires(self):
return {
'data': UKHierarchy(year=self.year),
'rel': UKHierarchyChildParentsUnion(year=self.year, levels=LEVELS),
'info': UKHierarchyInfoUnion(year=self.year, levels=LEVELS)
}
class UKHierarchy(UKTask, Hierarchy):
def requires(self):
return {
'rel': UKHierarchyChildParentsUnion(year=self.year, levels=LEVELS),
'info': UKHierarchyInfoUnion(year=self.year, levels=LEVELS)
}
class UKHierarchyInfoUnion(UKTask, HierarchyInfoUnion):
def requires(self):
return [UKLevelInfo(year=self.year, geography=level)
for level in self.levels]
class UKHierarchyChildParentsUnion(UKTask, HierarchyChildParentsUnion):
def requires(self):
child_parents = self._child_parents()
return {
'hierarchy': [
UKHierarchyChildParent(year=self.year,
current_geography=child_parent[0],
parent_geographies=child_parent[1])
for child_parent in child_parents]
}
class UKHierarchyChildParent(HierarchyChildParent):
def requires(self):
return {
'level': UKLevelHierarchy(year=self.year,
current_geography=self.current_geography,
parent_geographies=self.parent_geographies,
parent_geoid_fields=self._parent_geoid_fields),
'current_geom': geography(level=self.current_geography)(
year=self.year),
'parent_geoms': [
geography(level=parent_geography)(year=self.year) for
parent_geography in self.parent_geographies]
}
@property
def _current_geoid_field(self):
return geography_class(self.current_geography).geoid_column()
@property
def _parent_geoid_fields(self):
return [
geography_class(parent_geography).geoid_column() for
parent_geography in self.parent_geographies
]
class UKLevelHierarchy(LevelHierarchy):
def requires(self):
return {
'current_info': UKLevelInfo(year=self.year,
geography=self.current_geography),
'current_geom': geography(level=self.current_geography)(
year=self.year),
'parents_infos': [UKLevelInfo(year=self.year,
geography=parent_geography) for
parent_geography in self.parent_geographies],
'parents_geoms': [
geography(level=parent_geography)(year=self.year) for
parent_geography in self.parent_geographies]
}
@property
def _geoid_field(self):
return geography_class(self.current_geography).geoid_column()
class UKLevelInfo(LevelInfo):
def requires(self):
return geography(level=self.geography)(year=self.year)
@property
def _geoid_field(self):
return geography_class(self.geography).geoid_column()
@property
def _geoname_field(self):
return geography_class(self.geography).geoname_column()
| {
"content_hash": "c7cad567da0828a44d10b2d6749f89d8",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 85,
"avg_line_length": 33.09027777777778,
"alnum_prop": 0.6316894018887723,
"repo_name": "CartoDB/bigmetadata",
"id": "f7dbe4512604f28aef1a6cfdab2f516beccca55f",
"size": "4765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/uk/hierarchy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "143"
},
{
"name": "Dockerfile",
"bytes": "2305"
},
{
"name": "HTML",
"bytes": "19058"
},
{
"name": "JavaScript",
"bytes": "5864"
},
{
"name": "Makefile",
"bytes": "27552"
},
{
"name": "PLpgSQL",
"bytes": "32699"
},
{
"name": "Python",
"bytes": "2967442"
},
{
"name": "Shell",
"bytes": "11590"
}
],
"symlink_target": ""
} |
import uuid
from msrest.pipeline import ClientRawResponse
from .key_vault_authentication import KeyVaultAuthentication
from ..key_vault_client import KeyVaultClient as KeyVaultClientBase
from ..models import KeyVaultErrorException
from msrest.authentication import BasicTokenAuthentication
class CustomKeyVaultClient(KeyVaultClientBase):
def __init__(self, credentials):
"""The key vault client performs cryptographic key operations and vault operations against the Key Vault service.
:ivar config: Configuration for client.
:vartype config: KeyVaultClientConfiguration
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>` or :mod:`A KeyVaultAuthentication
object<key_vault_authentication>`
"""
# if the supplied credentials instance is not derived from KeyVaultAuthBase but is an AAD credential type
if not isinstance(credentials, KeyVaultAuthentication) and isinstance(credentials, BasicTokenAuthentication):
# wrap the supplied credentials with a KeyVaultAuthentication instance. Use that for the credentials supplied to the base client
credentials = KeyVaultAuthentication(credentials=credentials)
super(CustomKeyVaultClient, self).__init__(credentials)
def get_pending_certificate_signing_request(self, vault_base_url, certificate_name, custom_headers=None, raw=False, **operation_config):
"""Gets the Base64 pending certificate signing request (PKCS-10).
:param vault_base_url: The vault name, e.g.
https://myvault.vault.azure.net
:type vault_base_url: str
:param certificate_name: The name of the certificate
:type certificate_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Base64 encoded pending certificate signing request (PKCS-10).
:rtype: str
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/certificates/{certificate-name}/pending'
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
header_parameters['Accept'] = 'application/pkcs10'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise KeyVaultErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = response.content
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| {
"content_hash": "7c5ee00745bdf6381c526359b3c0379b",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 140,
"avg_line_length": 45.853932584269664,
"alnum_prop": 0.6897819161970106,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "a35a091f3ebca389157ef9eefbd3c7539051dc8d",
"size": "4427",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-keyvault/azure/keyvault/custom/key_vault_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
from . import Analysis, register_analysis
from .. import SIM_PROCEDURES
import logging
l = logging.getLogger('angr.analyses.callee_cleanup_finder')
class CalleeCleanupFinder(Analysis):
def __init__(self, starts=None, hook_all=False):
self.results = {}
if starts is None:
starts = [imp.resolvedby.rebased_addr for imp in self.project.loader.main_object.imports.itervalues()]
for addr in starts:
with self._resilience():
size = self.analyze(addr)
if size is None:
l.info("Couldn't find return for function at %#x", addr)
else:
self.results[addr] = size
if hook_all:
for addr, size in self.results.iteritems():
if self.project.is_hooked(addr):
continue
if size % self.project.arch.bytes != 0:
l.error("Function at %#x has a misaligned return?", addr)
continue
args = size / self.project.arch.bytes
cc = self.project.factory.cc_from_arg_kinds([False]*args)
cc.CALLEE_CLEANUP = True
sym = self.project.loader.find_symbol(addr)
name = sym.name if sym is not None else None
lib = self.project.loader.find_object_containing(addr)
libname = lib.provides if lib is not None else None
self.project.hook(addr, SIM_PROCEDURES['stubs']['ReturnUnconstrained'](cc=cc, display_name=name, library_name=libname, is_stub=True))
def analyze(self, addr):
seen = set()
todo = [addr]
while todo:
addr = todo.pop(0)
seen.add(addr)
irsb = self.project.factory.block(addr, opt_level=0).vex
if irsb.jumpkind == 'Ijk_Ret':
# got it!
for stmt in reversed(irsb.statements):
if stmt.tag == 'Ist_IMark':
l.error("VERY strange return instruction at %#x...", addr)
break
if stmt.tag == 'Ist_WrTmp':
if stmt.data.tag == 'Iex_Binop':
if stmt.data.op.startswith('Iop_Add'):
return stmt.data.args[1].con.value - self.project.arch.bytes
elif irsb.jumpkind == 'Ijk_Call':
if addr + irsb.size not in seen:
todo.append(addr + irsb.size)
else:
todo.extend(irsb.constant_jump_targets - seen)
return None
register_analysis(CalleeCleanupFinder, 'CalleeCleanupFinder')
| {
"content_hash": "0fd61598e9b5e62567a38434303395b9",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 149,
"avg_line_length": 41.78125,
"alnum_prop": 0.5329094988780853,
"repo_name": "chubbymaggie/angr",
"id": "f74b4a8f31867a40c849036624f250ee474b40ba",
"size": "2674",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "angr/analyses/callee_cleanup_finder.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6375"
},
{
"name": "C++",
"bytes": "38545"
},
{
"name": "Makefile",
"bytes": "617"
},
{
"name": "Python",
"bytes": "2762600"
}
],
"symlink_target": ""
} |
from flask.ext.login import AnonymousUserMixin, UserMixin
from flask_truss.models.base import BaseModel, db
from passlib.hash import bcrypt
class Anonymous(AnonymousUserMixin):
pass
class User(UserMixin, BaseModel):
# __tablename__ needs to be specified depending on the database, since some eg postgres reserve the word `user`.
__tablename__ = 'user_table'
id = db.Column(db.Integer, primary_key=True)
user_name = db.Column(db.Text, nullable=False)
pass_hash = db.Column(db.String(60), nullable=False)
@property
def password(self):
"""Return the pass_hash as if it were the password."""
return self.pass_hash
@password.setter
def password(self, new_password):
"""Automatically salt and hash the provided password."""
self.pass_hash = bcrypt.encrypt(new_password)
| {
"content_hash": "d6e5ea4954d2ec038592e5cca23c5230",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 116,
"avg_line_length": 31.25925925925926,
"alnum_prop": 0.6990521327014217,
"repo_name": "bmoar/flask-truss",
"id": "dcbe6bb2801b70596bbcd27dcd66a13f45fe5cd0",
"size": "844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_truss/models/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "21683"
},
{
"name": "Shell",
"bytes": "2560"
}
],
"symlink_target": ""
} |
from dynmen.dmenu import DMenu
from dynmen import new_dmenu
import pytest
def test_import():
menu = DMenu()
assert menu._trait_transformed['i'] == []
assert menu.i is False
assert menu.case_insensitive is False
menu.i = True
assert menu.i is True
assert menu.case_insensitive is True
assert menu._trait_transformed['i'] == ['-i']
def test_import2():
menu = new_dmenu(i=True)
assert menu.i is True
assert menu._trait_transformed['i'] == ['-i']
def test_help_msg():
assert len(DMenu.i.help) > 0
m = DMenu()
descr = m.traits()['case_insensitive']
assert descr.help == DMenu.i.help
| {
"content_hash": "0f9403a70489e3f1ba34e438da5decc1",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 49,
"avg_line_length": 23.85185185185185,
"alnum_prop": 0.6428571428571429,
"repo_name": "frostidaho/dynmen",
"id": "2fb64a5b221bac58ea990028222059acd094d110",
"size": "668",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_dmenu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2017"
},
{
"name": "Python",
"bytes": "72180"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
version = '0.6.0'
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "cmislib",
description = 'Apache Chemistry CMIS client library for Python',
version = version,
install_requires = [
'iso8601',
'httplib2'
],
author = 'Apache Chemistry Project',
author_email = 'dev@chemistry.apache.org',
license = 'Apache License (2.0)',
url = 'http://chemistry.apache.org/',
package_dir = {'':'src'},
packages = find_packages('src', exclude=['tests']),
#include_package_data = True,
exclude_package_data = {'':['tests']},
long_description = read('README.txt'),
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries",
],
)
| {
"content_hash": "af2c8deee46d192d1c92704deeafb268",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 70,
"avg_line_length": 31,
"alnum_prop": 0.6043643263757116,
"repo_name": "apache/chemistry-cmislib",
"id": "be22e3a5eb11f653f5707cbc1d88223cd09a2f24",
"size": "1913",
"binary": false,
"copies": "2",
"ref": "refs/heads/trunk",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3949"
},
{
"name": "Makefile",
"bytes": "6496"
},
{
"name": "Python",
"bytes": "388296"
}
],
"symlink_target": ""
} |
import numpy
import random
import math
class TerrainGenerator:
def create_random(self, size, myrange, symmetric=False):
"""Creates a random terrain map"""
ret = numpy.random.random_integers(0, myrange, size)
if symmetric:
ret = self.make_symmetric(ret)
return ret
def create_streak(self, size, myrange, symmetric=False):
"""Creates a terrain map containing streaks that run from north-west to south-east
Starts with a single point [[a]] and converts it into [[a, b], [c, d]]
where:
b = a + (random change)
c = a + (random change)
d = b + (random change) and c + (random change)
Repeat until size matches required size"""
add_random_range = self.add_random_range
# Creates the top row
ret = [[add_random_range(0, 0, myrange)]]
for x in range(size[0] - 1):
pos_west = ret[0][-1]
if pos_west <= 0:
ret[0].append(add_random_range(pos_west, 0, 1))
elif pos_west >= myrange:
ret[0].append(add_random_range(pos_west, -1, 0))
else:
ret[0].append(add_random_range(pos_west, -1, 1))
# Create the next row down
for y in range(size[1] - 1):
pos_north = ret[-1][0]
if pos_north <= 0:
next_row = [add_random_range(pos_north, 0, 1)]
elif pos_north >= myrange:
next_row = [add_random_range(pos_north, -1, 0)]
else:
next_row = [add_random_range(pos_north, -1, 1)]
for x in range(size[0] - 1):
pos_north = ret[-1][x+1]
pos_west = next_row[-1]
if pos_west == pos_north:
if pos_west <= 0:
next_row.append(add_random_range(pos_west, 0, 1))
elif pos_west >= myrange:
next_row.append(add_random_range(pos_west, -1, 0))
else:
next_row.append(add_random_range(pos_west, -1, 1))
elif abs(pos_west - pos_north) == 2:
next_row.append((pos_west + pos_north)/2)
else:
next_row.append(random.choice((pos_west, pos_north)))
ret.append(next_row)
if symmetric:
ret = self.make_symmetric(ret)
return numpy.array(ret)
def create_simple(self, size, myrange, symmetric=False):
"""Creates a procedural terrain map
Starts with corner points [[a, b], [c, d]] and converts it into [[a, e, b], [f, g, h], [c, i, d]]
where:
e = (a+b)/2 + (random change)
f = (a+c)/2 + (random change)
g = (a+b+c+d)/4 + (random change)
h = (b+d)/2 + (random change)
i = (c+d)/2 + (random change)
Repeat untill size is greater than required and truncate"""
add_random_range = self.add_random_range
ret = [[add_random_range(0, 0, myrange), add_random_range(0, 0, myrange)],
[add_random_range(0, 0, myrange), add_random_range(0, 0, myrange)]]
while len(ret) <= size[0]:
new_ret = []
for key_x, x in enumerate(ret):
new_ret.append(x)
if key_x != len(ret) - 1:
next_row = []
for key_y, pos_south in enumerate(x):
pos_north = ret[key_x+1][key_y]
pos_avg = (pos_north + pos_south)/2
if pos_avg <= 0:
next_row.append(add_random_range(pos_avg, 0, 1))
elif pos_avg >= myrange:
next_row.append(add_random_range(pos_avg, -1, 0))
else:
next_row.append(add_random_range(pos_avg, -1, 1))
new_ret.append(next_row)
ret = new_ret
new_ret = []
for key_x, x in enumerate(ret):
next_row = [x[0]]
for key_y, pos_east in enumerate(x[1:]):
pos_west = next_row[-1]
if key_x % 2 and not key_y % 2:
pos_north = ret[key_x-1][key_y+1]
pos_south = ret[key_x+1][key_y+1]
pos_avg = (pos_north + pos_south + pos_east + pos_west)/4
if pos_avg <= 0:
next_row.append(add_random_range(pos_avg, 0, 1))
elif pos_avg >= myrange:
next_row.append(add_random_range(pos_avg, -1, 0))
else:
next_row.append(add_random_range(pos_avg, -1, 1))
else:
pos_avg = (pos_east + pos_west)/2
if pos_avg <= 0:
next_row.append(add_random_range(pos_avg, 0, 1))
elif pos_avg >= myrange:
next_row.append(add_random_range(pos_avg, -1, 0))
else:
next_row.append(add_random_range(pos_avg, -1, 1))
next_row.append(pos_east)
new_ret.append(next_row)
ret = new_ret
ret = [x[:size[0]] for x in ret][:size[0]]
if symmetric:
ret = self.make_symmetric(ret)
return numpy.array(ret)
def create_perlin(self, size, roughness, symmetric=False):
(width, height) = size
values = numpy.zeros(size)
noise = numpy.random.random_sample((width+1, height+1))
octaves = (256, 8, 2)
for y in range(height):
for x in range(width):
if symmetric and x < y:
values[x][y] = values[y][x]
continue
nr = 1
for i in octaves:
top = y/i
left = x/i
my = float(y % i) / i
mx = float(x % i) / i
values[x][y] += self.interpolate(noise[top][left], noise[top][left+1],
noise[top+1][left],
noise[top+1][left+1], mx, my) * math.pow(0.5, nr)
nr += 1
values[x][y] = int(values[x][y] * roughness)
return numpy.array(values, dtype=int)
# Some helper functions.
def interpolate(self, p1, p2, p3, p4, x, y):
top = self.interpolate1d(p1, p2, x)
bottom = self.interpolate1d(p3, p4, x)
return self.interpolate1d(top, bottom, y)
def interpolate1d(self, p1, p2, mu):
return p1*(1-mu)+p2*mu
def add_random_range(self, x, rand_min, rand_max):
"""Returns a number that is between x + rand_min and x + rand_max (inclusive)"""
return x + random.randrange(rand_min, rand_max + 1)
def make_symmetric(self, ret):
"""Takes a 2-dimentional list and makes it symmetrical about the north-west / south-east axis"""
for x in range(len(ret)):
for y in range(x):
ret[x][y] = ret[y][x]
return ret
| {
"content_hash": "92b58d184b938aed2370d121bf5c39b9",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 108,
"avg_line_length": 40.58888888888889,
"alnum_prop": 0.46222283055023267,
"repo_name": "matthewrobertharris/cells",
"id": "5c23fce4f9b56c1eee5f5928d853cd40ad288479",
"size": "7306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "terrain/generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "131539"
}
],
"symlink_target": ""
} |
"""Tests for the Run Windows Registry plugin."""
import unittest
from plaso.parsers.winreg_plugins import run
from tests.parsers.winreg_plugins import test_lib
class AutoRunsPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the auto rus Windows Registry plugin."""
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = run.AutoRunsPlugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Run')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'RunOnce')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Run')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'RunOnce')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'RunOnce\\Setup')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'RunServices')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'RunServicesOnce')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcessNtuserRun(self):
"""Tests the Process function on a Run key."""
test_file_entry = self._GetTestFileEntry(['NTUSER-RunTests.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Run')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = run.AutoRunsPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'date_time': '2012-04-05 17:03:53.9920616',
'data_type': 'windows:registry:run',
'entries': [
'Sidebar: %ProgramFiles%\\Windows Sidebar\\Sidebar.exe /autoRun'],
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
'parser': plugin.plugin_name}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
def testProcessNtuserRunOnce(self):
"""Tests the Process function on a Run key."""
test_file_entry = self._GetTestFileEntry(['NTUSER-RunTests.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'RunOnce')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = run.AutoRunsPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'date_time': '2012-04-05 17:03:53.9920616',
'data_type': 'windows:registry:run',
'entries': [
'mctadmin: C:\\Windows\\System32\\mctadmin.exe'],
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
'parser': plugin.plugin_name}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
def testProcessSoftwareRun(self):
"""Tests the Process function on a Run key."""
test_file_entry = self._GetTestFileEntry(['SOFTWARE-RunTests'])
key_path = (
'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Run')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = run.AutoRunsPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'date_time': '2011-09-16 20:57:09.0675758',
'data_type': 'windows:registry:run',
'entries': [
('McAfee Host Intrusion Prevention Tray: "C:\\Program Files\\'
'McAfee\\Host Intrusion Prevention\\FireTray.exe"'),
('VMware Tools: "C:\\Program Files\\VMware\\VMware Tools\\'
'VMwareTray.exe"'),
('VMware User Process: "C:\\Program Files\\VMware\\VMware Tools\\'
'VMwareUser.exe"')],
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
'parser': plugin.plugin_name}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
def testProcessSoftwareRunOnce(self):
"""Tests the Process function on a RunOnce key."""
test_file_entry = self._GetTestFileEntry(['SOFTWARE-RunTests'])
key_path = (
'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'RunOnce')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = run.AutoRunsPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'date_time': '2012-04-06 14:07:27.7500000',
'data_type': 'windows:registry:run',
'entries': [
'*WerKernelReporting: %SYSTEMROOT%\\SYSTEM32\\WerFault.exe -k -rq'],
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
'parser': plugin.plugin_name}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "9064bfdfd305019d9674e3ab95980ad4",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 80,
"avg_line_length": 37.21621621621622,
"alnum_prop": 0.6714596949891067,
"repo_name": "kiddinn/plaso",
"id": "69a0bd51e644e391acbe9dabcfe3e9bbfb54b309",
"size": "6932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/parsers/winreg_plugins/run.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1047"
},
{
"name": "Makefile",
"bytes": "68"
},
{
"name": "PowerShell",
"bytes": "9560"
},
{
"name": "Python",
"bytes": "4878625"
},
{
"name": "Ruby",
"bytes": "926"
},
{
"name": "Shell",
"bytes": "26453"
}
],
"symlink_target": ""
} |
""" Cisco_IOS_XR_telemetry_model_driven_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR telemetry\-model\-driven package operational data.
This module contains definitions
for the following management objects\:
telemetry\-model\-driven\: Telemetry operational data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class MdtEncodingEnumEnum(Enum):
"""
MdtEncodingEnumEnum
MDT Encoding
.. data:: not_set = 0
ENCODING NOT SET
.. data:: gpb = 2
GPB
.. data:: self_describing_gpb = 3
SELF DESCRIBING GPB
.. data:: json = 4
JSON
"""
not_set = 0
gpb = 2
self_describing_gpb = 3
json = 4
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['MdtEncodingEnumEnum']
class MdtInternalPathStatusEnum(Enum):
"""
MdtInternalPathStatusEnum
Internal Subscription Path Status
.. data:: active = 0
Active
.. data:: internal_err = 1
Internal Error
.. data:: plugin_active = 2
Plugin Active
.. data:: plugin_not_initialized = 3
Plugin Not Initialized
.. data:: plugin_invalid_cadence = 4
Plugin Unsupported Cadence
.. data:: plugin_err = 5
Plugin Subscription Error
.. data:: filter_err = 6
Filter Error
"""
active = 0
internal_err = 1
plugin_active = 2
plugin_not_initialized = 3
plugin_invalid_cadence = 4
plugin_err = 5
filter_err = 6
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['MdtInternalPathStatusEnum']
class MdtIpEnum(Enum):
"""
MdtIpEnum
IP Type
.. data:: ipv4 = 1
IPv4
.. data:: ipv6 = 2
IPv6
"""
ipv4 = 1
ipv6 = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['MdtIpEnum']
class MdtTransportEnumEnum(Enum):
"""
MdtTransportEnumEnum
MDT Transport
.. data:: not_set = 0
PROTOCOL NOT SET
.. data:: grpc = 1
GRPC
.. data:: tcp = 2
TCP
.. data:: udp = 3
UDP
.. data:: dialin = 6
DIALIN
"""
not_set = 0
grpc = 1
tcp = 2
udp = 3
dialin = 6
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['MdtTransportEnumEnum']
class TelemetryModelDriven(object):
"""
Telemetry operational data
.. attribute:: destinations
Telemetry Destinations
**type**\: :py:class:`Destinations <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Destinations>`
.. attribute:: sensor_groups
Telemetry Sensor Groups
**type**\: :py:class:`SensorGroups <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.SensorGroups>`
.. attribute:: subscriptions
Telemetry Subscriptions
**type**\: :py:class:`Subscriptions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Subscriptions>`
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.destinations = TelemetryModelDriven.Destinations()
self.destinations.parent = self
self.sensor_groups = TelemetryModelDriven.SensorGroups()
self.sensor_groups.parent = self
self.subscriptions = TelemetryModelDriven.Subscriptions()
self.subscriptions.parent = self
class Destinations(object):
"""
Telemetry Destinations
.. attribute:: destination
Telemetry Destination
**type**\: list of :py:class:`Destination <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Destinations.Destination>`
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.destination = YList()
self.destination.parent = self
self.destination.name = 'destination'
class Destination(object):
"""
Telemetry Destination
.. attribute:: destination_id <key>
Id of the destination
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: configured
Set if this is configured destination group
**type**\: int
**range:** 0..4294967295
.. attribute:: destination
list of destinations defined in this group
**type**\: list of :py:class:`Destination_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Destinations.Destination.Destination_>`
.. attribute:: id
Destination Group name
**type**\: str
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.destination_id = None
self.configured = None
self.destination = YList()
self.destination.parent = self
self.destination.name = 'destination'
self.id = None
class Destination_(object):
"""
list of destinations defined in this group
.. attribute:: collection_group
List of collection groups for this destination group
**type**\: list of :py:class:`CollectionGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Destinations.Destination.Destination_.CollectionGroup>`
.. attribute:: destination
Destination
**type**\: :py:class:`Destination__ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Destinations.Destination.Destination_.Destination__>`
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.collection_group = YList()
self.collection_group.parent = self
self.collection_group.name = 'collection_group'
self.destination = TelemetryModelDriven.Destinations.Destination.Destination_.Destination__()
self.destination.parent = self
class Destination__(object):
"""
Destination
.. attribute:: dest_ip_address
Destination IP Address
**type**\: :py:class:`DestIpAddress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Destinations.Destination.Destination_.Destination__.DestIpAddress>`
.. attribute:: dest_port
Destination Port number
**type**\: int
**range:** 0..65535
.. attribute:: encoding
Destination group encoding
**type**\: :py:class:`MdtEncodingEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.MdtEncodingEnumEnum>`
.. attribute:: id
Destination Id
**type**\: str
.. attribute:: last_collection_time
Timestamp of the last collection
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: state
State of streaming on this destination
**type**\: int
**range:** 0..4294967295
.. attribute:: sub_id
Sub Id
**type**\: list of int
**range:** 0..18446744073709551615
.. attribute:: sub_id_str
Sub Idstr
**type**\: str
.. attribute:: tls
TLS connection to this destination
**type**\: int
**range:** 0..4294967295
.. attribute:: tls_host
TLS Hostname of this destination
**type**\: str
.. attribute:: total_num_of_bytes_sent
Total number of bytes sent for this destination
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: total_num_of_packets_sent
Total number of packets sent for this destination
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: transport
Destination group transport
**type**\: :py:class:`MdtTransportEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.MdtTransportEnumEnum>`
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.dest_ip_address = TelemetryModelDriven.Destinations.Destination.Destination_.Destination__.DestIpAddress()
self.dest_ip_address.parent = self
self.dest_port = None
self.encoding = None
self.id = None
self.last_collection_time = None
self.state = None
self.sub_id = YLeafList()
self.sub_id.parent = self
self.sub_id.name = 'sub_id'
self.sub_id_str = None
self.tls = None
self.tls_host = None
self.total_num_of_bytes_sent = None
self.total_num_of_packets_sent = None
self.transport = None
class DestIpAddress(object):
"""
Destination IP Address
.. attribute:: ip_type
IPType
**type**\: :py:class:`MdtIpEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.MdtIpEnum>`
.. attribute:: ipv4_address
IPV4 Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6_address
IPV6 Address
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.ip_type = None
self.ipv4_address = None
self.ipv6_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:dest-ip-address'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.ip_type is not None:
return True
if self.ipv4_address is not None:
return True
if self.ipv6_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Destinations.Destination.Destination_.Destination__.DestIpAddress']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:destination'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.dest_ip_address is not None and self.dest_ip_address._has_data():
return True
if self.dest_port is not None:
return True
if self.encoding is not None:
return True
if self.id is not None:
return True
if self.last_collection_time is not None:
return True
if self.state is not None:
return True
if self.sub_id is not None:
for child in self.sub_id:
if child is not None:
return True
if self.sub_id_str is not None:
return True
if self.tls is not None:
return True
if self.tls_host is not None:
return True
if self.total_num_of_bytes_sent is not None:
return True
if self.total_num_of_packets_sent is not None:
return True
if self.transport is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Destinations.Destination.Destination_.Destination__']['meta_info']
class CollectionGroup(object):
"""
List of collection groups for this destination
group
.. attribute:: avg_total_time
Average time for all processing (ms)
**type**\: int
**range:** 0..4294967295
.. attribute:: cadence
Period of the collections (ms)
**type**\: int
**range:** 0..4294967295
.. attribute:: collection_path
Array of information for sensor paths within collection group
**type**\: list of :py:class:`CollectionPath <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Destinations.Destination.Destination_.CollectionGroup.CollectionPath>`
.. attribute:: encoding
Destination group encoding
**type**\: :py:class:`MdtEncodingEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.MdtEncodingEnumEnum>`
.. attribute:: id
Collection Group id
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: internal_collection_group
Array of information for sysdb paths within collection group
**type**\: list of :py:class:`InternalCollectionGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Destinations.Destination.Destination_.CollectionGroup.InternalCollectionGroup>`
.. attribute:: last_collection_end_time
Timestamp of the end of last collection
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: last_collection_start_time
Timestamp of the start of last collection
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: max_collection_time
Maximum time for a collection (ms)
**type**\: int
**range:** 0..4294967295
.. attribute:: max_total_time
Maximum time for all processing (ms)
**type**\: int
**range:** 0..4294967295
.. attribute:: min_collection_time
Minimum time for a collection (ms)
**type**\: int
**range:** 0..4294967295
.. attribute:: min_total_time
Minimum time for all processing (ms)
**type**\: int
**range:** 0..4294967295
.. attribute:: total_collections
Completed collections count
**type**\: int
**range:** 0..4294967295
.. attribute:: total_not_ready
Total number skipped (not ready)
**type**\: int
**range:** 0..4294967295
.. attribute:: total_other_errors
Total number of errors
**type**\: int
**range:** 0..4294967295
.. attribute:: total_send_drops
Total number of send drops
**type**\: int
**range:** 0..4294967295
.. attribute:: total_send_errors
Total number of send errors
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.avg_total_time = None
self.cadence = None
self.collection_path = YList()
self.collection_path.parent = self
self.collection_path.name = 'collection_path'
self.encoding = None
self.id = None
self.internal_collection_group = YList()
self.internal_collection_group.parent = self
self.internal_collection_group.name = 'internal_collection_group'
self.last_collection_end_time = None
self.last_collection_start_time = None
self.max_collection_time = None
self.max_total_time = None
self.min_collection_time = None
self.min_total_time = None
self.total_collections = None
self.total_not_ready = None
self.total_other_errors = None
self.total_send_drops = None
self.total_send_errors = None
class CollectionPath(object):
"""
Array of information for sensor paths within
collection group
.. attribute:: path
Sensor Path
**type**\: str
.. attribute:: state
State, if sensor path is resolved or not
**type**\: int
**range:** 0..4294967295
.. attribute:: status_str
Error str, if there are any errors resolving the sensor path
**type**\: str
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.path = None
self.state = None
self.status_str = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:collection-path'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.path is not None:
return True
if self.state is not None:
return True
if self.status_str is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Destinations.Destination.Destination_.CollectionGroup.CollectionPath']['meta_info']
class InternalCollectionGroup(object):
"""
Array of information for sysdb paths within
collection group
.. attribute:: avg_collection_time
Average time for a collection (ms)
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: cadence
Period of the collections (ms)
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: collection_method
Collection method in use
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: max_collection_time
Maximum time for a collection (ms)
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: min_collection_time
Minimum time for a collection (ms)
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: path
Sysdb Path
**type**\: str
.. attribute:: status
Status of collection path
**type**\: :py:class:`MdtInternalPathStatusEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.MdtInternalPathStatusEnum>`
.. attribute:: total_collections
Completed collections count
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_collections_missed
Total number of collections missed
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_datalist_count
Total number of datalists
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_datalist_errors
Total number of datalist errors
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_encode_errors
Total number of encode errors
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_encode_notready
Total number of encode deferred
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_finddata_count
Total number of finddata
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_finddata_errors
Total number of finddata errors
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_get_bulk_count
Total number of get bulk
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_get_bulk_errors
Total number of get bulk errors
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_get_count
Total number of gets
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_get_errors
Total number of get errors
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_item_count
Total number of items retrived from sysdb
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_list_count
Total number of lists
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_list_errors
Total number of list errors
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_send_bytes_dropped
Total number of send bytes dropped
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: total_send_drops
Total number of send channel full
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_send_errors
Total number of send errors
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_send_packets
Total number of packets sent
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_sent_bytes
Total number of bytes sent
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.avg_collection_time = None
self.cadence = None
self.collection_method = None
self.max_collection_time = None
self.min_collection_time = None
self.path = None
self.status = None
self.total_collections = None
self.total_collections_missed = None
self.total_datalist_count = None
self.total_datalist_errors = None
self.total_encode_errors = None
self.total_encode_notready = None
self.total_finddata_count = None
self.total_finddata_errors = None
self.total_get_bulk_count = None
self.total_get_bulk_errors = None
self.total_get_count = None
self.total_get_errors = None
self.total_item_count = None
self.total_list_count = None
self.total_list_errors = None
self.total_send_bytes_dropped = None
self.total_send_drops = None
self.total_send_errors = None
self.total_send_packets = None
self.total_sent_bytes = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:internal-collection-group'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.avg_collection_time is not None:
return True
if self.cadence is not None:
return True
if self.collection_method is not None:
return True
if self.max_collection_time is not None:
return True
if self.min_collection_time is not None:
return True
if self.path is not None:
return True
if self.status is not None:
return True
if self.total_collections is not None:
return True
if self.total_collections_missed is not None:
return True
if self.total_datalist_count is not None:
return True
if self.total_datalist_errors is not None:
return True
if self.total_encode_errors is not None:
return True
if self.total_encode_notready is not None:
return True
if self.total_finddata_count is not None:
return True
if self.total_finddata_errors is not None:
return True
if self.total_get_bulk_count is not None:
return True
if self.total_get_bulk_errors is not None:
return True
if self.total_get_count is not None:
return True
if self.total_get_errors is not None:
return True
if self.total_item_count is not None:
return True
if self.total_list_count is not None:
return True
if self.total_list_errors is not None:
return True
if self.total_send_bytes_dropped is not None:
return True
if self.total_send_drops is not None:
return True
if self.total_send_errors is not None:
return True
if self.total_send_packets is not None:
return True
if self.total_sent_bytes is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Destinations.Destination.Destination_.CollectionGroup.InternalCollectionGroup']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:collection-group'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.avg_total_time is not None:
return True
if self.cadence is not None:
return True
if self.collection_path is not None:
for child_ref in self.collection_path:
if child_ref._has_data():
return True
if self.encoding is not None:
return True
if self.id is not None:
return True
if self.internal_collection_group is not None:
for child_ref in self.internal_collection_group:
if child_ref._has_data():
return True
if self.last_collection_end_time is not None:
return True
if self.last_collection_start_time is not None:
return True
if self.max_collection_time is not None:
return True
if self.max_total_time is not None:
return True
if self.min_collection_time is not None:
return True
if self.min_total_time is not None:
return True
if self.total_collections is not None:
return True
if self.total_not_ready is not None:
return True
if self.total_other_errors is not None:
return True
if self.total_send_drops is not None:
return True
if self.total_send_errors is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Destinations.Destination.Destination_.CollectionGroup']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:destination'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.collection_group is not None:
for child_ref in self.collection_group:
if child_ref._has_data():
return True
if self.destination is not None and self.destination._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Destinations.Destination.Destination_']['meta_info']
@property
def _common_path(self):
if self.destination_id is None:
raise YPYModelError('Key property destination_id is None')
return '/Cisco-IOS-XR-telemetry-model-driven-oper:telemetry-model-driven/Cisco-IOS-XR-telemetry-model-driven-oper:destinations/Cisco-IOS-XR-telemetry-model-driven-oper:destination[Cisco-IOS-XR-telemetry-model-driven-oper:destination-id = ' + str(self.destination_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.destination_id is not None:
return True
if self.configured is not None:
return True
if self.destination is not None:
for child_ref in self.destination:
if child_ref._has_data():
return True
if self.id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Destinations.Destination']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-telemetry-model-driven-oper:telemetry-model-driven/Cisco-IOS-XR-telemetry-model-driven-oper:destinations'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.destination is not None:
for child_ref in self.destination:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Destinations']['meta_info']
class Subscriptions(object):
"""
Telemetry Subscriptions
.. attribute:: subscription
Telemetry Subscription
**type**\: list of :py:class:`Subscription <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Subscriptions.Subscription>`
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.subscription = YList()
self.subscription.parent = self
self.subscription.name = 'subscription'
class Subscription(object):
"""
Telemetry Subscription
.. attribute:: subscription_id <key>
Id of the subscription
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: collection_group
List of collection groups active for this subscription
**type**\: list of :py:class:`CollectionGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Subscriptions.Subscription.CollectionGroup>`
.. attribute:: subscription
Subscription
**type**\: :py:class:`Subscription_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Subscriptions.Subscription.Subscription_>`
.. attribute:: total_num_of_bytes_sent
Total number of bytes sent for this subscription
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: total_num_of_packets_sent
Total number of packets sent for this subscription
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.subscription_id = None
self.collection_group = YList()
self.collection_group.parent = self
self.collection_group.name = 'collection_group'
self.subscription = TelemetryModelDriven.Subscriptions.Subscription.Subscription_()
self.subscription.parent = self
self.total_num_of_bytes_sent = None
self.total_num_of_packets_sent = None
class Subscription_(object):
"""
Subscription
.. attribute:: destination_grp
Array of destinations within a subscription
**type**\: list of :py:class:`DestinationGrp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Subscriptions.Subscription.Subscription_.DestinationGrp>`
.. attribute:: id
Collection Subscription name
**type**\: str
.. attribute:: sensor_profile
List of sensor groups within a subscription
**type**\: list of :py:class:`SensorProfile <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Subscriptions.Subscription.Subscription_.SensorProfile>`
.. attribute:: state
Subscription state
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.destination_grp = YList()
self.destination_grp.parent = self
self.destination_grp.name = 'destination_grp'
self.id = None
self.sensor_profile = YList()
self.sensor_profile.parent = self
self.sensor_profile.name = 'sensor_profile'
self.state = None
class SensorProfile(object):
"""
List of sensor groups within a subscription
.. attribute:: heartbeat_interval
Heartbeat interval for the sensor group (s)
**type**\: int
**range:** 0..4294967295
.. attribute:: sample_interval
Sample interval for the sensor group (ms)
**type**\: int
**range:** 0..4294967295
.. attribute:: sensor_group
sensor group
**type**\: :py:class:`SensorGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Subscriptions.Subscription.Subscription_.SensorProfile.SensorGroup>`
.. attribute:: suppress_redundant
Suppress Redundant
**type**\: bool
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.heartbeat_interval = None
self.sample_interval = None
self.sensor_group = TelemetryModelDriven.Subscriptions.Subscription.Subscription_.SensorProfile.SensorGroup()
self.sensor_group.parent = self
self.suppress_redundant = None
class SensorGroup(object):
"""
sensor group
.. attribute:: configured
Set if this is configured sensor group
**type**\: int
**range:** 0..4294967295
.. attribute:: id
Sensor Group name
**type**\: str
.. attribute:: sensor_path
Array of information for sensor paths within sensor group
**type**\: list of :py:class:`SensorPath <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Subscriptions.Subscription.Subscription_.SensorProfile.SensorGroup.SensorPath>`
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.configured = None
self.id = None
self.sensor_path = YList()
self.sensor_path.parent = self
self.sensor_path.name = 'sensor_path'
class SensorPath(object):
"""
Array of information for sensor paths within
sensor group
.. attribute:: path
Sensor Path
**type**\: str
.. attribute:: state
State, if sensor path is resolved or not
**type**\: int
**range:** 0..4294967295
.. attribute:: status_str
Error str, if there are any errors resolving the sensor path
**type**\: str
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.path = None
self.state = None
self.status_str = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:sensor-path'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.path is not None:
return True
if self.state is not None:
return True
if self.status_str is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Subscriptions.Subscription.Subscription_.SensorProfile.SensorGroup.SensorPath']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:sensor-group'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.configured is not None:
return True
if self.id is not None:
return True
if self.sensor_path is not None:
for child_ref in self.sensor_path:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Subscriptions.Subscription.Subscription_.SensorProfile.SensorGroup']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:sensor-profile'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.heartbeat_interval is not None:
return True
if self.sample_interval is not None:
return True
if self.sensor_group is not None and self.sensor_group._has_data():
return True
if self.suppress_redundant is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Subscriptions.Subscription.Subscription_.SensorProfile']['meta_info']
class DestinationGrp(object):
"""
Array of destinations within a subscription
.. attribute:: configured
Set if this is configured destination group
**type**\: int
**range:** 0..4294967295
.. attribute:: destination
list of destinations defined in this group
**type**\: list of :py:class:`Destination <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Subscriptions.Subscription.Subscription_.DestinationGrp.Destination>`
.. attribute:: id
Destination Group name
**type**\: str
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.configured = None
self.destination = YList()
self.destination.parent = self
self.destination.name = 'destination'
self.id = None
class Destination(object):
"""
list of destinations defined in this group
.. attribute:: dest_ip_address
Destination IP Address
**type**\: :py:class:`DestIpAddress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Subscriptions.Subscription.Subscription_.DestinationGrp.Destination.DestIpAddress>`
.. attribute:: dest_port
Destination Port number
**type**\: int
**range:** 0..65535
.. attribute:: encoding
Destination group encoding
**type**\: :py:class:`MdtEncodingEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.MdtEncodingEnumEnum>`
.. attribute:: id
Destination Id
**type**\: str
.. attribute:: last_collection_time
Timestamp of the last collection
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: state
State of streaming on this destination
**type**\: int
**range:** 0..4294967295
.. attribute:: sub_id
Sub Id
**type**\: list of int
**range:** 0..18446744073709551615
.. attribute:: sub_id_str
Sub Idstr
**type**\: str
.. attribute:: tls
TLS connection to this destination
**type**\: int
**range:** 0..4294967295
.. attribute:: tls_host
TLS Hostname of this destination
**type**\: str
.. attribute:: total_num_of_bytes_sent
Total number of bytes sent for this destination
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: total_num_of_packets_sent
Total number of packets sent for this destination
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: transport
Destination group transport
**type**\: :py:class:`MdtTransportEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.MdtTransportEnumEnum>`
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.dest_ip_address = TelemetryModelDriven.Subscriptions.Subscription.Subscription_.DestinationGrp.Destination.DestIpAddress()
self.dest_ip_address.parent = self
self.dest_port = None
self.encoding = None
self.id = None
self.last_collection_time = None
self.state = None
self.sub_id = YLeafList()
self.sub_id.parent = self
self.sub_id.name = 'sub_id'
self.sub_id_str = None
self.tls = None
self.tls_host = None
self.total_num_of_bytes_sent = None
self.total_num_of_packets_sent = None
self.transport = None
class DestIpAddress(object):
"""
Destination IP Address
.. attribute:: ip_type
IPType
**type**\: :py:class:`MdtIpEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.MdtIpEnum>`
.. attribute:: ipv4_address
IPV4 Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6_address
IPV6 Address
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.ip_type = None
self.ipv4_address = None
self.ipv6_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:dest-ip-address'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.ip_type is not None:
return True
if self.ipv4_address is not None:
return True
if self.ipv6_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Subscriptions.Subscription.Subscription_.DestinationGrp.Destination.DestIpAddress']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:destination'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.dest_ip_address is not None and self.dest_ip_address._has_data():
return True
if self.dest_port is not None:
return True
if self.encoding is not None:
return True
if self.id is not None:
return True
if self.last_collection_time is not None:
return True
if self.state is not None:
return True
if self.sub_id is not None:
for child in self.sub_id:
if child is not None:
return True
if self.sub_id_str is not None:
return True
if self.tls is not None:
return True
if self.tls_host is not None:
return True
if self.total_num_of_bytes_sent is not None:
return True
if self.total_num_of_packets_sent is not None:
return True
if self.transport is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Subscriptions.Subscription.Subscription_.DestinationGrp.Destination']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:destination-grp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.configured is not None:
return True
if self.destination is not None:
for child_ref in self.destination:
if child_ref._has_data():
return True
if self.id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Subscriptions.Subscription.Subscription_.DestinationGrp']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:subscription'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.destination_grp is not None:
for child_ref in self.destination_grp:
if child_ref._has_data():
return True
if self.id is not None:
return True
if self.sensor_profile is not None:
for child_ref in self.sensor_profile:
if child_ref._has_data():
return True
if self.state is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Subscriptions.Subscription.Subscription_']['meta_info']
class CollectionGroup(object):
"""
List of collection groups active for this
subscription
.. attribute:: avg_total_time
Average time for all processing (ms)
**type**\: int
**range:** 0..4294967295
.. attribute:: cadence
Period of the collections (ms)
**type**\: int
**range:** 0..4294967295
.. attribute:: collection_path
Array of information for sensor paths within collection group
**type**\: list of :py:class:`CollectionPath <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Subscriptions.Subscription.CollectionGroup.CollectionPath>`
.. attribute:: encoding
Destination group encoding
**type**\: :py:class:`MdtEncodingEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.MdtEncodingEnumEnum>`
.. attribute:: id
Collection Group id
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: internal_collection_group
Array of information for sysdb paths within collection group
**type**\: list of :py:class:`InternalCollectionGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.Subscriptions.Subscription.CollectionGroup.InternalCollectionGroup>`
.. attribute:: last_collection_end_time
Timestamp of the end of last collection
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: last_collection_start_time
Timestamp of the start of last collection
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: max_collection_time
Maximum time for a collection (ms)
**type**\: int
**range:** 0..4294967295
.. attribute:: max_total_time
Maximum time for all processing (ms)
**type**\: int
**range:** 0..4294967295
.. attribute:: min_collection_time
Minimum time for a collection (ms)
**type**\: int
**range:** 0..4294967295
.. attribute:: min_total_time
Minimum time for all processing (ms)
**type**\: int
**range:** 0..4294967295
.. attribute:: total_collections
Completed collections count
**type**\: int
**range:** 0..4294967295
.. attribute:: total_not_ready
Total number skipped (not ready)
**type**\: int
**range:** 0..4294967295
.. attribute:: total_other_errors
Total number of errors
**type**\: int
**range:** 0..4294967295
.. attribute:: total_send_drops
Total number of send drops
**type**\: int
**range:** 0..4294967295
.. attribute:: total_send_errors
Total number of send errors
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.avg_total_time = None
self.cadence = None
self.collection_path = YList()
self.collection_path.parent = self
self.collection_path.name = 'collection_path'
self.encoding = None
self.id = None
self.internal_collection_group = YList()
self.internal_collection_group.parent = self
self.internal_collection_group.name = 'internal_collection_group'
self.last_collection_end_time = None
self.last_collection_start_time = None
self.max_collection_time = None
self.max_total_time = None
self.min_collection_time = None
self.min_total_time = None
self.total_collections = None
self.total_not_ready = None
self.total_other_errors = None
self.total_send_drops = None
self.total_send_errors = None
class CollectionPath(object):
"""
Array of information for sensor paths within
collection group
.. attribute:: path
Sensor Path
**type**\: str
.. attribute:: state
State, if sensor path is resolved or not
**type**\: int
**range:** 0..4294967295
.. attribute:: status_str
Error str, if there are any errors resolving the sensor path
**type**\: str
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.path = None
self.state = None
self.status_str = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:collection-path'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.path is not None:
return True
if self.state is not None:
return True
if self.status_str is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Subscriptions.Subscription.CollectionGroup.CollectionPath']['meta_info']
class InternalCollectionGroup(object):
"""
Array of information for sysdb paths within
collection group
.. attribute:: avg_collection_time
Average time for a collection (ms)
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: cadence
Period of the collections (ms)
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: collection_method
Collection method in use
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: max_collection_time
Maximum time for a collection (ms)
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: min_collection_time
Minimum time for a collection (ms)
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: path
Sysdb Path
**type**\: str
.. attribute:: status
Status of collection path
**type**\: :py:class:`MdtInternalPathStatusEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.MdtInternalPathStatusEnum>`
.. attribute:: total_collections
Completed collections count
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_collections_missed
Total number of collections missed
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_datalist_count
Total number of datalists
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_datalist_errors
Total number of datalist errors
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_encode_errors
Total number of encode errors
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_encode_notready
Total number of encode deferred
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_finddata_count
Total number of finddata
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_finddata_errors
Total number of finddata errors
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_get_bulk_count
Total number of get bulk
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_get_bulk_errors
Total number of get bulk errors
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_get_count
Total number of gets
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_get_errors
Total number of get errors
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_item_count
Total number of items retrived from sysdb
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_list_count
Total number of lists
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_list_errors
Total number of list errors
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_send_bytes_dropped
Total number of send bytes dropped
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: total_send_drops
Total number of send channel full
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_send_errors
Total number of send errors
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_send_packets
Total number of packets sent
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total_sent_bytes
Total number of bytes sent
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.avg_collection_time = None
self.cadence = None
self.collection_method = None
self.max_collection_time = None
self.min_collection_time = None
self.path = None
self.status = None
self.total_collections = None
self.total_collections_missed = None
self.total_datalist_count = None
self.total_datalist_errors = None
self.total_encode_errors = None
self.total_encode_notready = None
self.total_finddata_count = None
self.total_finddata_errors = None
self.total_get_bulk_count = None
self.total_get_bulk_errors = None
self.total_get_count = None
self.total_get_errors = None
self.total_item_count = None
self.total_list_count = None
self.total_list_errors = None
self.total_send_bytes_dropped = None
self.total_send_drops = None
self.total_send_errors = None
self.total_send_packets = None
self.total_sent_bytes = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:internal-collection-group'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.avg_collection_time is not None:
return True
if self.cadence is not None:
return True
if self.collection_method is not None:
return True
if self.max_collection_time is not None:
return True
if self.min_collection_time is not None:
return True
if self.path is not None:
return True
if self.status is not None:
return True
if self.total_collections is not None:
return True
if self.total_collections_missed is not None:
return True
if self.total_datalist_count is not None:
return True
if self.total_datalist_errors is not None:
return True
if self.total_encode_errors is not None:
return True
if self.total_encode_notready is not None:
return True
if self.total_finddata_count is not None:
return True
if self.total_finddata_errors is not None:
return True
if self.total_get_bulk_count is not None:
return True
if self.total_get_bulk_errors is not None:
return True
if self.total_get_count is not None:
return True
if self.total_get_errors is not None:
return True
if self.total_item_count is not None:
return True
if self.total_list_count is not None:
return True
if self.total_list_errors is not None:
return True
if self.total_send_bytes_dropped is not None:
return True
if self.total_send_drops is not None:
return True
if self.total_send_errors is not None:
return True
if self.total_send_packets is not None:
return True
if self.total_sent_bytes is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Subscriptions.Subscription.CollectionGroup.InternalCollectionGroup']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:collection-group'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.avg_total_time is not None:
return True
if self.cadence is not None:
return True
if self.collection_path is not None:
for child_ref in self.collection_path:
if child_ref._has_data():
return True
if self.encoding is not None:
return True
if self.id is not None:
return True
if self.internal_collection_group is not None:
for child_ref in self.internal_collection_group:
if child_ref._has_data():
return True
if self.last_collection_end_time is not None:
return True
if self.last_collection_start_time is not None:
return True
if self.max_collection_time is not None:
return True
if self.max_total_time is not None:
return True
if self.min_collection_time is not None:
return True
if self.min_total_time is not None:
return True
if self.total_collections is not None:
return True
if self.total_not_ready is not None:
return True
if self.total_other_errors is not None:
return True
if self.total_send_drops is not None:
return True
if self.total_send_errors is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Subscriptions.Subscription.CollectionGroup']['meta_info']
@property
def _common_path(self):
if self.subscription_id is None:
raise YPYModelError('Key property subscription_id is None')
return '/Cisco-IOS-XR-telemetry-model-driven-oper:telemetry-model-driven/Cisco-IOS-XR-telemetry-model-driven-oper:subscriptions/Cisco-IOS-XR-telemetry-model-driven-oper:subscription[Cisco-IOS-XR-telemetry-model-driven-oper:subscription-id = ' + str(self.subscription_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.subscription_id is not None:
return True
if self.collection_group is not None:
for child_ref in self.collection_group:
if child_ref._has_data():
return True
if self.subscription is not None and self.subscription._has_data():
return True
if self.total_num_of_bytes_sent is not None:
return True
if self.total_num_of_packets_sent is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Subscriptions.Subscription']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-telemetry-model-driven-oper:telemetry-model-driven/Cisco-IOS-XR-telemetry-model-driven-oper:subscriptions'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.subscription is not None:
for child_ref in self.subscription:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.Subscriptions']['meta_info']
class SensorGroups(object):
"""
Telemetry Sensor Groups
.. attribute:: sensor_group
Telemetry Sensor Groups
**type**\: list of :py:class:`SensorGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.SensorGroups.SensorGroup>`
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.sensor_group = YList()
self.sensor_group.parent = self
self.sensor_group.name = 'sensor_group'
class SensorGroup(object):
"""
Telemetry Sensor Groups
.. attribute:: sensor_group_id <key>
Id of the sensor group
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: configured
Set if this is configured sensor group
**type**\: int
**range:** 0..4294967295
.. attribute:: id
Sensor Group name
**type**\: str
.. attribute:: sensor_path
Array of information for sensor paths within sensor group
**type**\: list of :py:class:`SensorPath <ydk.models.cisco_ios_xr.Cisco_IOS_XR_telemetry_model_driven_oper.TelemetryModelDriven.SensorGroups.SensorGroup.SensorPath>`
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.sensor_group_id = None
self.configured = None
self.id = None
self.sensor_path = YList()
self.sensor_path.parent = self
self.sensor_path.name = 'sensor_path'
class SensorPath(object):
"""
Array of information for sensor paths within
sensor group
.. attribute:: path
Sensor Path
**type**\: str
.. attribute:: state
State, if sensor path is resolved or not
**type**\: int
**range:** 0..4294967295
.. attribute:: status_str
Error str, if there are any errors resolving the sensor path
**type**\: str
"""
_prefix = 'telemetry-model-driven-oper'
_revision = '2016-07-14'
def __init__(self):
self.parent = None
self.path = None
self.state = None
self.status_str = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-telemetry-model-driven-oper:sensor-path'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.path is not None:
return True
if self.state is not None:
return True
if self.status_str is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.SensorGroups.SensorGroup.SensorPath']['meta_info']
@property
def _common_path(self):
if self.sensor_group_id is None:
raise YPYModelError('Key property sensor_group_id is None')
return '/Cisco-IOS-XR-telemetry-model-driven-oper:telemetry-model-driven/Cisco-IOS-XR-telemetry-model-driven-oper:sensor-groups/Cisco-IOS-XR-telemetry-model-driven-oper:sensor-group[Cisco-IOS-XR-telemetry-model-driven-oper:sensor-group-id = ' + str(self.sensor_group_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.sensor_group_id is not None:
return True
if self.configured is not None:
return True
if self.id is not None:
return True
if self.sensor_path is not None:
for child_ref in self.sensor_path:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.SensorGroups.SensorGroup']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-telemetry-model-driven-oper:telemetry-model-driven/Cisco-IOS-XR-telemetry-model-driven-oper:sensor-groups'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.sensor_group is not None:
for child_ref in self.sensor_group:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven.SensorGroups']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-telemetry-model-driven-oper:telemetry-model-driven'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.destinations is not None and self.destinations._has_data():
return True
if self.sensor_groups is not None and self.sensor_groups._has_data():
return True
if self.subscriptions is not None and self.subscriptions._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_telemetry_model_driven_oper as meta
return meta._meta_table['TelemetryModelDriven']['meta_info']
| {
"content_hash": "1f8463d1a09b48f514c828aab489d5b9",
"timestamp": "",
"source": "github",
"line_count": 2827,
"max_line_length": 292,
"avg_line_length": 38.21188539087372,
"alnum_prop": 0.41191390881740336,
"repo_name": "111pontes/ydk-py",
"id": "d03fdeac11e1ccb9a4cefc4c74d6601bae1039b2",
"size": "108025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_telemetry_model_driven_oper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "446117948"
}
],
"symlink_target": ""
} |
import datetime
from copy import deepcopy
from django.core.exceptions import FieldError, MultipleObjectsReturned
from django.db import models, transaction
from django.test import TestCase
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import (
Article, Category, Child, First, Parent, Record, Relation, Reporter,
School, Student, Third, ToFieldChild,
)
class ManyToOneTests(TestCase):
def setUp(self):
# Create a few Reporters.
self.r = Reporter(first_name='John', last_name='Smith', email='john@example.com')
self.r.save()
self.r2 = Reporter(first_name='Paul', last_name='Jones', email='paul@example.com')
self.r2.save()
# Create an Article.
self.a = Article(id=None, headline="This is a test",
pub_date=datetime.date(2005, 7, 27), reporter=self.r)
self.a.save()
def test_get(self):
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
# These are strings instead of unicode strings because that's what was used in
# the creation of this reporter (and we haven't refreshed the data from the
# database, which always returns unicode strings).
self.assertEqual((r.first_name, self.r.last_name), ('John', 'Smith'))
def test_create(self):
# You can also instantiate an Article by passing the Reporter's ID
# instead of a Reporter object.
a3 = Article(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
a3.save()
self.assertEqual(a3.reporter.id, self.r.id)
# Similarly, the reporter ID can be a string.
a4 = Article(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
a4.save()
self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>")
def test_add(self):
# Create an Article via the Reporter object.
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.assertEqual(repr(new_article), "<Article: John's second story>")
self.assertEqual(new_article.reporter.id, self.r.id)
# Create a new article, and add it to the article set.
new_article2 = Article(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
self.r.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# Add the same article to a different article set - check that it moves.
self.r2.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r2.id)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Adding an object of the wrong type raises TypeError.
with transaction.atomic():
with six.assertRaisesRegex(self, TypeError,
"'Article' instance expected, got <Reporter.*"):
self.r.article_set.add(self.r2)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_set(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(self.r2.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
])
# Funny case - because the ForeignKey cannot be null,
# existing members of the set must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
def test_assign(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter directly using the descriptor.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again using set descriptor.
self.r2.article_set = [new_article, new_article2]
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(self.r2.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
])
# Funny case - assignment notation can only go so far; because the
# ForeignKey cannot be null, existing members of the set must remain.
self.r.article_set = [new_article]
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Reporter cannot be null - there should not be a clear or remove method
self.assertFalse(hasattr(self.r2.article_set, 'remove'))
self.assertFalse(hasattr(self.r2.article_set, 'clear'))
def test_assign_unsaved_check_override(self):
"""
#24495 - Assigning an unsaved object to a ForeignKey
should be allowed when the allow_unsaved_instance_assignment
attribute has been set to True.
"""
class UnsavedForeignKey(models.ForeignKey):
# A ForeignKey which can point to an unsaved object
allow_unsaved_instance_assignment = True
class Band(models.Model):
name = models.CharField(max_length=50)
class BandMember(models.Model):
band = UnsavedForeignKey(Band, models.CASCADE)
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
beatles = Band(name='The Beatles')
john = BandMember(first_name='John', last_name='Lennon')
# This should not raise an exception as the ForeignKey between member
# and band has allow_unsaved_instance_assignment=True.
john.band = beatles
self.assertEqual(john.band, beatles)
def test_selects(self):
self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='This'),
["<Article: This is a test>"])
self.assertEqual(self.r.article_set.count(), 2)
self.assertEqual(self.r2.article_set.count(), 1)
# Get articles by id
self.assertQuerysetEqual(Article.objects.filter(id__exact=self.a.id),
["<Article: This is a test>"])
self.assertQuerysetEqual(Article.objects.filter(pk=self.a.id),
["<Article: This is a test>"])
# Query on an article property
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='This'),
["<Article: This is a test>"])
# The API automatically follows relationships as far as you need.
# Use double underscores to separate relationships.
# This works as many levels deep as you want. There's no limit.
# Find all Articles for any Reporter whose first name is "John".
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name__exact='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Check that implied __exact also works
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Query twice over the related field.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# The underlying query only makes one join when a related table is referenced twice.
queryset = Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith')
self.assertNumQueries(1, list, queryset)
self.assertEqual(queryset.query.get_compiler(queryset.db).as_sql()[0].count('INNER JOIN'), 1)
# The automatically joined table has a predictable name.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John').extra(
where=["many_to_one_reporter.last_name='Smith'"]),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# ... and should work fine with the unicode that comes out of forms.Form.cleaned_data
self.assertQuerysetEqual(
(Article.objects
.filter(reporter__first_name__exact='John')
.extra(where=["many_to_one_reporter.last_name='%s'" % 'Smith'])),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Find all Articles for a Reporter.
# Use direct ID check, pk check, and object comparison
self.assertQuerysetEqual(
Article.objects.filter(reporter__id__exact=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__pk=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r.id, self.r2.id]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r, self.r2]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# You can also use a queryset instead of a literal list of instances.
# The queryset must be reduced to a list of values using values(),
# then converted into a query
self.assertQuerysetEqual(
Article.objects.filter(
reporter__in=Reporter.objects.filter(first_name='John').values('pk').query
).distinct(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_reverse_selects(self):
a3 = Article.objects.create(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
# Reporters can be queried
self.assertQuerysetEqual(Reporter.objects.filter(id__exact=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(pk=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(first_name__startswith='John'),
["<Reporter: John Smith>"])
# Reporters can query in opposite direction of ForeignKey definition
self.assertQuerysetEqual(Reporter.objects.filter(article__id__exact=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article__pk=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id, a3.id]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id, a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a, a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T'),
["<Reporter: John Smith>", "<Reporter: John Smith>"],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct(),
["<Reporter: John Smith>"])
# Counting in the opposite direction works in conjunction with distinct()
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').count(), 2)
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct().count(), 1)
# Queries can go round in circles.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John'),
[
"<Reporter: John Smith>",
"<Reporter: John Smith>",
"<Reporter: John Smith>",
],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John').distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__exact=self.r).distinct(),
["<Reporter: John Smith>"])
# Check that implied __exact also works.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter=self.r).distinct(),
["<Reporter: John Smith>"])
# It's possible to use values() calls across many-to-one relations.
# (Note, too, that we clear the ordering here so as not to drag the
# 'headline' field into the columns being used to determine uniqueness)
d = {'reporter__first_name': 'John', 'reporter__last_name': 'Smith'}
self.assertEqual([d],
list(Article.objects.filter(reporter=self.r).distinct().order_by()
.values('reporter__first_name', 'reporter__last_name')))
def test_select_related(self):
# Check that Article.objects.select_related().dates() works properly when
# there are multiple Articles with the same date but different foreign-key
# objects (Reporters).
r1 = Reporter.objects.create(first_name='Mike', last_name='Royko', email='royko@suntimes.com')
r2 = Reporter.objects.create(first_name='John', last_name='Kass', email='jkass@tribune.com')
Article.objects.create(headline='First', pub_date=datetime.date(1980, 4, 23), reporter=r1)
Article.objects.create(headline='Second', pub_date=datetime.date(1980, 4, 23), reporter=r2)
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'day')),
[
datetime.date(1980, 4, 23),
datetime.date(2005, 7, 27),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'month')),
[
datetime.date(1980, 4, 1),
datetime.date(2005, 7, 1),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'year')),
[
datetime.date(1980, 1, 1),
datetime.date(2005, 1, 1),
])
def test_delete(self):
self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
Article.objects.create(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
# If you delete a reporter, his articles will be deleted.
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
[
"<Reporter: John Smith>",
"<Reporter: Paul Jones>",
])
self.r2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
["<Reporter: John Smith>"])
# You can delete using a JOIN in the query.
Reporter.objects.filter(article__headline__startswith='This').delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertQuerysetEqual(Article.objects.all(), [])
def test_explicit_fk(self):
# Create a new Article with get_or_create using an explicit value
# for a ForeignKey.
a2, created = Article.objects.get_or_create(id=None,
headline="John's second test",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r.id)
self.assertTrue(created)
self.assertEqual(a2.reporter.id, self.r.id)
# You can specify filters containing the explicit FK value.
self.assertQuerysetEqual(
Article.objects.filter(reporter_id__exact=self.r.id),
[
"<Article: John's second test>",
"<Article: This is a test>",
])
# Create an Article by Paul for the same date.
a3 = Article.objects.create(id=None, headline="Paul's commentary",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r2.id)
self.assertEqual(a3.reporter.id, self.r2.id)
# Get should respect explicit foreign keys as well.
self.assertRaises(MultipleObjectsReturned,
Article.objects.get, reporter_id=self.r.id)
self.assertEqual(repr(a3),
repr(Article.objects.get(reporter_id=self.r2.id,
pub_date=datetime.date(2011, 5, 7))))
def test_deepcopy_and_circular_references(self):
# Regression for #12876 -- Model methods that include queries that
# recursive don't cause recursion depth problems under deepcopy.
self.r.cached_query = Article.objects.filter(reporter=self.r)
self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>")
def test_manager_class_caching(self):
r1 = Reporter.objects.create(first_name='Mike')
r2 = Reporter.objects.create(first_name='John')
# Same twice
self.assertIs(r1.article_set.__class__, r1.article_set.__class__)
# Same as each other
self.assertIs(r1.article_set.__class__, r2.article_set.__class__)
def test_create_relation_with_ugettext_lazy(self):
reporter = Reporter.objects.create(first_name='John',
last_name='Smith',
email='john.smith@example.com')
lazy = ugettext_lazy('test')
reporter.article_set.create(headline=lazy,
pub_date=datetime.date(2011, 6, 10))
notlazy = six.text_type(lazy)
article = reporter.article_set.get()
self.assertEqual(article.headline, notlazy)
def test_values_list_exception(self):
expected_message = "Cannot resolve keyword 'notafield' into field. Choices are: %s"
self.assertRaisesMessage(FieldError,
expected_message % ', '.join(sorted(f.name for f in Reporter._meta.get_fields())),
Article.objects.values_list,
'reporter__notafield')
self.assertRaisesMessage(FieldError,
expected_message % ', '.join(['EXTRA'] + sorted(f.name for f in Article._meta.get_fields())),
Article.objects.extra(select={'EXTRA': 'EXTRA_SELECT'}).values_list,
'notafield')
def test_fk_assignment_and_related_object_cache(self):
# Tests of ForeignKey assignment and the related-object cache (see #6886).
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
# Look up the object again so that we get a "fresh" object.
c = Child.objects.get(name="Child")
p = c.parent
# Accessing the related object again returns the exactly same object.
self.assertIs(c.parent, p)
# But if we kill the cache, we get a new object.
del c._parent_cache
self.assertIsNot(c.parent, p)
# Assigning a new object results in that object getting cached immediately.
p2 = Parent.objects.create(name="Parent 2")
c.parent = p2
self.assertIs(c.parent, p2)
# Assigning None succeeds if field is null=True.
p.bestchild = None
self.assertIsNone(p.bestchild)
# bestchild should still be None after saving.
p.save()
self.assertIsNone(p.bestchild)
# bestchild should still be None after fetching the object again.
p = Parent.objects.get(name="Parent")
self.assertIsNone(p.bestchild)
# Assigning None fails: Child.parent is null=False.
self.assertRaises(ValueError, setattr, c, "parent", None)
# You also can't assign an object of the wrong type here
self.assertRaises(ValueError, setattr, c, "parent", First(id=1, second=1))
# Nor can you explicitly assign None to Child.parent during object
# creation (regression for #9649).
self.assertRaises(ValueError, Child, name='xyzzy', parent=None)
self.assertRaises(ValueError, Child.objects.create, name='xyzzy', parent=None)
# Creation using keyword argument should cache the related object.
p = Parent.objects.get(name="Parent")
c = Child(parent=p)
self.assertIs(c.parent, p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Parent()
with self.assertRaisesMessage(ValueError,
'Cannot assign "%r": "%s" instance isn\'t saved in the database.'
% (p, Child.parent.field.remote_field.model._meta.object_name)):
Child(parent=p)
with self.assertRaisesMessage(ValueError,
'Cannot assign "%r": "%s" instance isn\'t saved in the database.'
% (p, Child.parent.field.remote_field.model._meta.object_name)):
ToFieldChild(parent=p)
# Creation using attname keyword argument and an id will cause the
# related object to be fetched.
p = Parent.objects.get(name="Parent")
c = Child(parent_id=p.id)
self.assertIsNot(c.parent, p)
self.assertEqual(c.parent, p)
def test_multiple_foreignkeys(self):
# Test of multiple ForeignKeys to the same model (bug #7125).
c1 = Category.objects.create(name='First')
c2 = Category.objects.create(name='Second')
c3 = Category.objects.create(name='Third')
r1 = Record.objects.create(category=c1)
r2 = Record.objects.create(category=c1)
r3 = Record.objects.create(category=c2)
r4 = Record.objects.create(category=c2)
r5 = Record.objects.create(category=c3)
Relation.objects.create(left=r1, right=r2)
Relation.objects.create(left=r3, right=r4)
Relation.objects.create(left=r1, right=r3)
Relation.objects.create(left=r5, right=r2)
Relation.objects.create(left=r3, right=r2)
q1 = Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second'])
self.assertQuerysetEqual(q1, ["<Relation: First - Second>"])
q2 = Category.objects.filter(record__left_set__right__category__name='Second').order_by('name')
self.assertQuerysetEqual(q2, ["<Category: First>", "<Category: Second>"])
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
self.assertRaises(ValueError, Child.objects.create, name="Grandchild", parent=c)
def test_fk_instantiation_outside_model(self):
# Regression for #12190 -- Should be able to instantiate a FK outside
# of a model, and interrogate its related field.
cat = models.ForeignKey(Category, models.CASCADE)
self.assertEqual('id', cat.remote_field.get_related_field().name)
def test_relation_unsaved(self):
# Test that the <field>_set manager does not join on Null value fields (#17541)
Third.objects.create(name='Third 1')
Third.objects.create(name='Third 2')
th = Third(name="testing")
# The object isn't saved an thus the relation field is null - we won't even
# execute a query in this case.
with self.assertNumQueries(0):
self.assertEqual(th.child_set.count(), 0)
th.save()
# Now the model is saved, so we will need to execute an query.
with self.assertNumQueries(1):
self.assertEqual(th.child_set.count(), 0)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_student = Student.objects.create(school=public_school)
private_school = School.objects.create(is_public=False)
private_student = Student.objects.create(school=private_school)
# Only one school is available via all() due to the custom default manager.
self.assertQuerysetEqual(
School.objects.all(),
["<School: School object>"]
)
self.assertEqual(public_student.school, public_school)
# Make sure the base manager is used so that an student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_student.school, private_school)
# If the manager is marked "use_for_related_fields", it'll get used instead
# of the "bare" queryset. Usually you'd define this as a property on the class,
# but this approximates that in a way that's easier in tests.
School.objects.use_for_related_fields = True
try:
private_student = Student.objects.get(pk=private_student.pk)
self.assertRaises(School.DoesNotExist, lambda: private_student.school)
finally:
School.objects.use_for_related_fields = False
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Article(), 'reporter'))
| {
"content_hash": "69f8a0fdea4b93ccefac362fd95da7f9",
"timestamp": "",
"source": "github",
"line_count": 663,
"max_line_length": 126,
"avg_line_length": 47.18401206636501,
"alnum_prop": 0.5820413643192789,
"repo_name": "marcelocure/django",
"id": "7b2a640b2bace7f6b123173d9eab199f7a15ea74",
"size": "31283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/many_to_one/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "48399"
},
{
"name": "HTML",
"bytes": "172916"
},
{
"name": "JavaScript",
"bytes": "247734"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11193968"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
'''
tests/task
Tests for the task module.
'''
| {
"content_hash": "92a9428193beae53d1ae52c9ac443da5",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 26,
"avg_line_length": 11.5,
"alnum_prop": 0.6521739130434783,
"repo_name": "sublime-ycmd/sublime-ycmd",
"id": "d79d581f4ece543d02b5c858dd5d411aa925e177",
"size": "70",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/task/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "315808"
}
],
"symlink_target": ""
} |
"""
This is a password generation and encryption tool.
This Software is provided as is without any warranty. In no case should the Author be liable for the Software.
Tim Hartmann , November 2015
"""
import random , os , binascii , base64 , getpass , hashlib
from tkinter import Tk
from Crypto.Cipher import AES
#https://code.activestate.com/recipes/410692/
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
#usage:
# for case in switch(x):
# if case(value):
# if case() <- returns if no other value matches
class DataModule:
def __init__(self,name,password,website):
self.website = website
self.password = password
self.name = name
def encrypt(data):
#return str(data)#for development only NO ENCRYPTION !!!
encoded = EncodeAES(cipher,data)
return encoded
def decrypt(data):
#return str(data)#for development only NO ENCRYPTION !!!
decoded = DecodeAES(cipher,data)
return decoded
def loadPwFile():
if os.stat("pws.encr").st_size == 0:
return 0
else:
s = open("pws.encr","rt")
dt = s.readline()
dt = decrypt(dt)
dt = dt.split("$")
del dt[len(dt)-1]
for i in dt:
x = i.split("#")
x[2] = x[2].replace("\n","")
dm = DataModule(x[0],x[1],x[2])
data.append(dm)
def savePwFile():
s = open("pws.encr","wt")
b = ""
for i in data:
b += str(i.name+"#"+i.password+"#"+i.website)
b += "$"
b = encrypt(b).decode('utf-8')
s.write(b)
#def loadSettings():
def genPassword():
x = int(binascii.hexlify(os.urandom(8)).decode('utf-8'),16)
y = int(binascii.hexlify(os.urandom(5)).decode('utf-8'),16)
c = int(binascii.hexlify(os.urandom(2)).decode('utf-8'),16)
ps = []
pw = ""
i = 0
while x > len(CHARS):
i += 1
ps.append(x % len(CHARS))
x -= y
if i > 100000:
break
for i in range(PASSWORD_LENGHT):
pw += str(CHARS[ps[c+i]])
return pw
def addNewEntryG(name,website):#Generate a new random Password for this website
data.append(DataModule(name,genPassword(),website))
def addNewEntryM(name,password,website):#Use the users Password
data.append(DataModule(name,password,website))
GlobPasswd = getpass.getpass("Enter your Master-Password(16 character minimum recomended):")
GlobPasswd = hashlib.sha256(GlobPasswd.encode('utf-8')).hexdigest()
#Encryption Setup
BLOCK_SIZE = 32
PADDING = '{'
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
EncodeAES = lambda c , s: base64.b64encode(c.encrypt(pad(s)))
DecodeAES = lambda c , e: c.decrypt(base64.b64decode(e)).decode('utf-8').rstrip(PADDING)
secret = GlobPasswd.encode[0:32]
cipher = AES.new(secret)
#Password-Generation Setup
PASSWORD_LENGHT = 50#Safer Config random.randint(50,53) -> more password-lenghts to bruteforce | not needed
CHARS = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','0','1','2','3','4','5','6','7','8','9','!','.',',','?','*','&','"','+','-']
#Tkinter Setup
r = Tk()
r.withdraw()
data = []
loadPwFile()
run = True
while run:
choice = input(">")
for case in switch(choice):
if case("ls") or case("list"):
for i in range(len(data)):
print(str(i)+". >"+data[i].name+" : "+data[i].password+" : "+data[i].website)
elif case("cp website"):
choice = int(input("Index >>"))
if choice >= 0 and choice < len(data):
r.withdraw()
r.clipboard_clear()
r.clipboard_append(data[choice].website)
else:
print("Index out of bounds!")
elif case("cp password"):
choice = int(input("Index >>"))
if choice >= 0 and choice < len(data):
r.withdraw()
r.clipboard_clear()
r.clipboard_append(data[choice].password)
else:
print("Index out of bounds!")
elif case("cp name"):
choice = int(input("Index >>"))
if choice >= 0 and choice< len(data):
r.withdraw()
r.clipboard_clear()
r.clipboard_append(data[choice].name)
else:
print("Index out of bounds!")
elif case("add new"):
name = input("Username >>")
website = input("Website >>")
addNewEntryG(name,website)
elif case("add manual"):
name = input("Username >>")
password = input("Password >>")
website = input("Website >>")
addNewEntryM(name,password,website)
elif case("rm"):
index = input("Index >>")
confirm = input("Are you sure you want to delete the Entry at Index "+str(index)+" ?(y/n)")
if confirm == "y":
del data[int(index)]
elif case("exit") or case("close"):
run = False
elif case("edit"):
index = input("Index >>")
c = input("Name, Password, Website(n/p/w)>>")
new = input("New Value >>")
if c == "n":
data[int(index)].name = new
elif c == "p":
data[int(index)].password = new
elif c == "w":
data[int(index)].website = new
else:
pass
elif case("regen"):
index = input("Index >>")
data[int(index)].password = genPassword()
else:
print("Invalid option!")
savePwFile()
| {
"content_hash": "5006f9ef2ebfa1b29a0276d8651dc71e",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 293,
"avg_line_length": 29.472826086956523,
"alnum_prop": 0.6107320671215195,
"repo_name": "TimseineLPs/PasswMngr",
"id": "94d3eec314701322d7a4620237c1993ed4d7aa7d",
"size": "5423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "textVersion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13567"
}
],
"symlink_target": ""
} |
from difflib import SequenceMatcher
from django.forms import HiddenInput
from django.shortcuts import get_object_or_404
from form_utils.forms import BetterForm
from server.base import SUBSTITUTIONS
from .datacollection.pluginmanagementinterface import PluginManagementInterface
from .models import Plugin, Argument, ExecutionHistory, PluginExecution
from django import forms
from .mongohandler import handler
class ProjectForm(forms.Form):
plugins = forms.ModelMultipleChoiceField(queryset=Plugin.objects.all().filter(active=True, installed=True))
class SparkSubmitForm(forms.Form):
change_form_template = 'progressbarupload/change_form.html'
add_form_template = 'progressbarupload/change_form.html'
file = forms.FileField(label='Jar / Python File')
class_name = forms.CharField(label='Fully Qualified Class Name', max_length=300, required=False)
arguments = forms.CharField(label='Arguments', max_length=1000, required=False)
def set_argument_values(form_data):
for id_string, value in form_data.items():
if "argument" in id_string:
parts = id_string.split("_")
argument_id = parts[2]
argument = get_object_or_404(Argument, pk=argument_id)
argument.install_value = value
argument.save()
def set_argument_execution_values(form_data, plugin_executions):
for id_string, value in form_data.items():
if "argument" in id_string:
parts = id_string.split("_")
plugin_id = parts[0]
argument_id = parts[2]
for plugin_execution in plugin_executions:
if plugin_execution.plugin.id == int(plugin_id):
found_plugin_execution = plugin_execution
exe = ExecutionHistory(execution_argument=get_object_or_404(Argument, pk=argument_id),
plugin_execution=found_plugin_execution,
execution_value=value)
exe.save()
def get_form(plugins, post, type, project=None, initial_revisions=None, initial_exec_type=None):
created_fieldsets = []
plugin_fields = {}
EXEC_OPTIONS = (('all', 'Execute on all revisions'), ('error', 'Execute on all revisions with errors'),
('new', 'Execute on new revisions'), ('rev', 'Execute on following revisions:'), ('ver', 'Execute on all revisions where verification failed for one Plugin'))
# we need to get the correct pluginmanager for this information because that depends on selected queue
interface = PluginManagementInterface.find_correct_plugin_manager()
cores_per_job = interface.default_cores_per_job()
queue = interface.default_queue()
added_fields = []
if type == 'execute':
vcs_url = ''
if project:
vcs_url = handler.get_vcs_url_for_project_id(project.mongo_id)
# Add fields if there are plugins that work on revision level
rev_plugins = [plugin for plugin in plugins if plugin.plugin_type == 'rev']
if len(rev_plugins) > 0:
plugin_fields['execution'] = forms.ChoiceField(widget=forms.RadioSelect, choices=EXEC_OPTIONS, initial=initial_exec_type)
plugin_fields['revisions'] = forms.CharField(label='Revisions (comma-separated)', required=False, initial=initial_revisions, widget=forms.Textarea)
added_fields.append('execution')
added_fields.append('revisions')
repo_plugins = [plugin for plugin in plugins if plugin.plugin_type == 'repo']
# If we have revision or repository plugins, we need to ask for the repository to use
if len(rev_plugins) > 0 or len(repo_plugins) > 0:
plugin_fields['repository_url'] = forms.CharField(label='Repository URL', required=True, initial=vcs_url)
added_fields.append('repository_url')
plugin_fields['queue'] = forms.CharField(label='Default job queue', initial=queue, required=False)
added_fields.append('queue')
plugin_fields['cores_per_job'] = forms.IntegerField(label='Cores per job (HPC only)', initial=cores_per_job, required=False)
added_fields.append('cores_per_job')
created_fieldsets.append(['Basis Configuration', {'fields': added_fields}])
# Create lists for the fieldsets and a list for the fields of the form
for plugin in plugins:
arguments = []
for argument in plugin.argument_set.all().filter(type=type):
identifier = '%s_argument_%s' % (plugin.id, argument.id)
arguments.append(identifier)
initial = None
for name, value in SUBSTITUTIONS.items():
if SequenceMatcher(None, argument.name, name).ratio() > 0.8:
initial = value['name']
if argument.name == 'repository_url':
initial = vcs_url
plugin_fields[identifier] = forms.CharField(label=argument.name, required=argument.required,
initial=initial, help_text=argument.description)
created_fieldsets.append([str(plugin), {'fields': arguments}])
# Dynamically created pluginform
class PluginForm(BetterForm):
class Meta:
fieldsets = created_fieldsets
def __init__(self, *args, **kwargs):
super(PluginForm, self).__init__(*args, **kwargs)
self.fields = plugin_fields
return PluginForm(post)
| {
"content_hash": "a482b4374fb27a0165d16107436281b6",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 182,
"avg_line_length": 46.295081967213115,
"alnum_prop": 0.6281869688385269,
"repo_name": "smartshark/serverSHARK",
"id": "fbf4e74f60f2629eb8be8cca26802045f43e39e8",
"size": "5648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smartshark/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "66094"
},
{
"name": "HTML",
"bytes": "40562"
},
{
"name": "JavaScript",
"bytes": "1056"
},
{
"name": "Jinja",
"bytes": "4515"
},
{
"name": "Less",
"bytes": "78481"
},
{
"name": "Python",
"bytes": "267732"
},
{
"name": "Ruby",
"bytes": "5037"
},
{
"name": "SCSS",
"bytes": "79489"
}
],
"symlink_target": ""
} |
from flask_restful import Resource
class DistributorDetail(Resource):
def get(self, _id):
return {_id: "1"}, 200
def put(self, _id):
return {_id: "2"}, 200
def delete(self, _id):
return {self, _id}, 200
class DistributorList(Resource):
def get(self):
return {}, 200
def post(self):
return {'task': 'Hello world'}, 201 | {
"content_hash": "011c9e5fe3f596beae0a5cf71a60837c",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 43,
"avg_line_length": 19.2,
"alnum_prop": 0.5703125,
"repo_name": "ianjuma/sunpower",
"id": "aa5093c423e211e43540ed896cd4140b03c0def1",
"size": "384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distributors/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29972"
},
{
"name": "Shell",
"bytes": "1661"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HNeg_SchoolOrdFac_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HNeg_SchoolOrdFac_CompleteLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HNeg_SchoolOrdFac_CompleteLHS, self).__init__(name='HNeg_SchoolOrdFac_CompleteLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = []
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Neg_SchoolOrdFac')
# Nodes that represent match classes
# match class School() node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__School"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
#Nodes that represent apply classes
# match class OrdinaryFacility() node
self.add_node()
self.vs[1]["MT_subtypeMatching__"] = False
self.vs[1]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_subtypes__"] = []
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__OrdinaryFacility"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# Nodes that represent the match associations of the property.
# Nodes that represent the apply associations of the property.
# Nodes that represent trace relations
# backward association School---->OrdinaryFacility node
self.add_node()
self.vs[2]["MT_subtypeMatching__"] = False
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_subtypes__"] = []
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__trace_link"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'blink2')
# Add the edges
self.add_edges([
(1,2), # apply_class OrdinaryFacility() -> backward_association
(2,0), # backward_association -> apply_class School()
])
# Add the attribute equations
self["equations"] = []
def eval_attr11(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr12(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| {
"content_hash": "e9f506643ae0a7dde95ab4525744561b",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 128,
"avg_line_length": 48.24666666666667,
"alnum_prop": 0.4572336603565013,
"repo_name": "levilucio/SyVOLT",
"id": "60a4ab75c9864f13ebe3a241b906b7c8317ca12a",
"size": "7237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ExFamToPerson/contracts/HNeg_SchoolOrdFac_CompleteLHS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
from xml.parsers import expat
from os.path import sep as path_sep
# ------------------------
# Django dependency Import
# ------------------------
from django.conf import settings
SOURCE_ROOT = settings.MEDIA_ROOT
class ParsedTreeNode(object):
"""
A parsed XML Element Node.
This class embeds all the information associated to a XML node
and exposes them (via properties and methods) in a more OOP friendly way.
"""
def __init__(self, name, attributes):
"""
Saves the Tag name and attributes dictionary
"""
self.name = name
self.attributes = attributes
# Reference ID - self assigned during parsing
self._id = -1
#Initialize children list to empty list
self.children = list()
self.parent = None # Reference to the parent node
self._ref_filename = None
self._ref_filepath = None
self._repr_name = None # Representation name for the node (see code_label property)
self._start_line = None
self._end_line = None
self._instruction_class = None
self._instruction = None
def add_child(self, element):
self.children.append(element)
def get_attribute(self, attr_name):
"""
Returns the value associated to the input
*attr_name* if it exists, None instead.
"""
if attr_name in self.attributes:
return self.attributes.get(attr_name)
return None
def get_elements(self, name=''):
"""
If *name* is not None or empty string, this functions
returns all the children whose name matches with the *name*.
Conversely, the entire list of children is returned.
"""
if name:
return filter(lambda node: node.name == name, self.children)
else:
return self.children
@staticmethod
def from_xml_to_unicode(value):
"""
Sanitize the input _value_ in a Unicode-compliant format.
"""
value = str(value).strip()
value = value.replace(''', '\'')
value = value.replace('"', '"')
value = value.replace('>', '>')
value = value.replace('<', '<')
value = value.replace('&', '&')
return value
@staticmethod
def from_unicode_to_xml(xml_line):
"""
Sanitize the input _value_ in a XML-compliant format.
"""
if xml_line.find("name=") != -1:
value = xml_line[xml_line.find("name=")+len('name="'):xml_line.find('" line')]
value = value.replace('&', '&')
value = value.replace('>', '>')
value = value.replace('<', '<')
value = value.replace("'", ''')
value = value.replace('"', '"')
new_xml_line = xml_line[:xml_line.find("name=")+len('name="')] + value + \
xml_line[xml_line.find('" line'):]
return new_xml_line
else:
return xml_line
def repr_in_xml(self, indent=''):
"""
Represent the tree rooted in self as a
XML tree.
"""
tagline = '%s<%s name="%s" line="%d" instruction_class="%s" instruction="%s">\n' % (
indent, self.name, self.label, self.line_number, self.instruction_class,
self.instruction)
if self.is_leaf_node:
tagline += '</%s>' % self.name
return tagline
for child in self.children_nodes:
tagline += child.repr_in_xml(indent=indent + " ")
tagline += indent + "</%s>\n" % self.name
return tagline
def to_json(self):
json_repr = {
'name': self.name,
'label': self.label,
'node_type': self.instruction_class,
'instruction': self.instruction,
'id': self.node_id,
'start_line': self.startline,
'end_line': self.endline,
'filepath': self.src_filepath,
'filename': self.src_filename
}
children = []
for c in self.children:
cn_json = c.to_json()
children.append(cn_json)
json_repr['children'] = children
return json_repr
@classmethod
def from_json(cls, json):
node = cls(json['name'], {
'name': json['label']
})
node.src_filepath = json['filepath']
node.src_filename = json['filename']
node.node_id = json['id']
node.instruction = json['instruction']
node.node_type = json['node_type']
node.startline = json['start_line']
node.endline = json['end_line']
node.children = [cls.from_json(c) for c in json['children']]
return node
def as_ptb(self):
"""
Represent the tree (rooted in self) in the Penn Tree Bank (PTB) format
"""
# TODO: Change recursive implementation into an iterative one
if self.is_leaf_node:
label = self.label.replace('(', 'LBR').replace(')', 'RBR')
return '(%s %s)' % (self.instruction, label)
ptb = '(%s ' % self.instruction
for cn in self.children_nodes:
ptb += cn.as_ptb()
ptb += ')'
return ptb
def __repr__(self):
if self.src_filename:
return "[%s] %s - (%d, %d)" % (self.src_filename, self.code_label,
self.startline, self.endline)
else:
return "%s - (%d, %d)" % (self.code_label, self.startline, self.endline)
def __str__(self):
return str(repr(self))
def __hash__(self):
"""
So far, the hash key depends on the machine where
the parsing took place as it is based on the `_ref_filepath` attribute.
In the generale case, this doesn't matter as it is just a way to encode
the `ParsedTreeNode` in a Python dictionary
(see `Xml2ObjectParser.encode_node`).
"""
# FIXME: There's a known Bug affecting this hashing strategy!
# If we encode the same file at two different granularity levels
# (e.g., ClassLevel and Method Level), there will be no correspondence
# between the hashing value of the *same* nodes. For example, if we perform a
# MethodLevel parsing, the value of the hashing of upper level *class nodes*
# (i.e., `node.is_class == True`) will be different from those
# calculated for the *same* nodes instances performing a ClassLevel parsing.
# This is because so far the hashing function depends on attributes whose
# value change if we call the hash during and/or after the parsing of the whole
# file has been completed
# (namely `self.startline` and `self.endline`). In fact, in the former case,
# the values of these attributes will be equal to 0 as no child has been added yet.
return hash('%s_%d_%d_%d' % (self.code_label, self.startline, self.endline,
len(self.children)))
# ===== Properties =====
@property
def children_nodes(self):
for cn in self.children:
yield cn
@property
def parent_node(self):
return self.parent
@parent_node.setter
def parent_node(self, pnode):
self.parent = pnode
@property
def is_leaf_node(self):
return len(self.children) == 0
@property
def is_filename(self):
return self.name.lower() == "srcfile"
@property
def is_class(self):
return self.name.lower() == "class_statement_node"
@property
def is_method(self):
return self.name.lower() == "method_statement_node"
@property
def is_generic_method(self):
return self.name.lower() == "method_statement_node" \
and not self.instruction == "ANNOTATION_METHOD_DECLARATION"
@property
def is_statement(self):
return self.name.lower() == "statement_node"
@property
def is_identifier(self):
return self.instruction_class == "IDENTIFIER"
@property
def instruction_class(self):
if not self._instruction_class:
attr_name = self.get_attribute("instruction_class")
self._instruction_class = attr_name if attr_name else self.attributes.get("node_type")
return self._instruction_class
@instruction_class.setter
def instruction_class(self, instruction_class_value):
self.instruction_class = instruction_class_value
@property
def instruction(self):
if not self._instruction:
attr_name = self.get_attribute("instruction")
self._instruction = attr_name if attr_name else self.get_attribute("node_type_ls")
return self._instruction
@instruction.setter
def instruction(self, instruction_value):
self._instruction = instruction_value
@property
def label(self):
name = self.attributes.get('name')
if name.isupper():
name = name.lower()
return name
@property
def code_label(self):
"""
The `code_label` property corresponds to a Natural Language representation of
current node.
This NL node representation is determined as it follows:
* if the node is a leaf node, its code label is its own label attribute.
* if the node is an internal node, its code label corresponds to the one associated
to the first leaf node rightmost among its children whose `instruction` attribute is equal
to `IDENTIFIER`. In case there isn't any node matching the above criterion,
the `code_label` attribute will correspond to the ones associated to every
child node (separated by a dash).
"""
if not self._repr_name:
if self.is_leaf_node:
self._repr_name = self.label
else:
for ch_node in reversed(self.children):
if ch_node.is_leaf_node and ch_node.instruction == 'IDENTIFIER':
self._repr_name = ch_node.label # Get the label of leaf node
break
if not self._repr_name:
# This is the case when current node has not leaf nodes
# among its children. In this case, code_label will be equal to the
# concatenation of all code_label properties of children
self._repr_name = '-'.join([n.code_label for n in self.children_nodes])
return self._repr_name
@property
def line_number(self):
"""
Get line number attribute and covert it
to an int value.
Return 0 (zero) if line_number is None or
it is not set.
"""
line_no_attr = self.get_attribute('line')
line_no = line_no_attr if line_no_attr else self.get_attribute('line_number')
if not line_no:
return 0
return int(line_no)
# ID property (get and set)
@property
def node_id(self):
return self._id
@node_id.setter
def node_id(self, id_value):
self._id = id_value
@property
def src_filename(self):
if not self._ref_filename and self.parent:
self._ref_filename = self.parent.src_filename
return self._ref_filename
@src_filename.setter
def src_filename(self, filename):
self._ref_filename = filename
@property
def src_filepath(self):
if not self._ref_filepath and self.parent:
self._ref_filepath = self.parent.src_filepath
return self._ref_filepath
@src_filepath.setter
def src_filepath(self, filepath):
self._ref_filepath = filepath
@property
def src_relative_filepath(self):
"""Returns the relative source file path, i.e., the `src_filepath` without SOURCE_ROOT"""
relative_filepath = self.src_filepath.replace(SOURCE_ROOT, '')
if relative_filepath.startswith(path_sep):
return relative_filepath[1:]
return relative_filepath
@property
def startline(self):
if not self._start_line:
# if attribute has not ever been set
self._start_line = self.line_number
if not self._start_line:
# if the line number is equal to zero (fake node in tree)
# get the first startline != 0 looking at children
for node in self.iter_breadth_first(include_self=False):
# iter children breadth-first
if node.startline:
self._start_line = node.startline # Catcha!
break
return self._start_line
@startline.setter
def startline(self, value):
self._start_line = value
def _get_max_recursive_endline(self):
"""
Get the 'deepest' end_line value
by traversing depth-first the subtrees
rooted in children nodes.
"""
return max([child.endline for child in self.children_nodes])
@property
def endline(self):
if not self._end_line:
if self.is_leaf_node:
self._end_line = self.line_number
else:
self._end_line = self._get_max_recursive_endline()
return self._end_line
@endline.setter
def endline(self, value):
self._end_line = value
# ===== Traversal Algorithms ======
def iter_depth_first(self, include_self=True):
# Depth first traversal
stack = [self, ] if include_self else reversed(self.children)
while stack:
current_node = stack.pop(0)
yield current_node
stack.extend(reversed(current_node.children))
__iter__ = iter_depth_first
def iter_breadth_first(self, include_self=True):
# Breadth first traversal
stack = [self, ] if include_self else [child for child in self.children]
while stack:
current_node = stack.pop(0)
yield current_node
stack.extend(current_node.children)
#__iter__ = iter_breadth_first
class Xml2ObjectParser(object):
"""
XML to Object Converter.
This solution is a modification of the original idea presented
in the Cookbook recipe "12.5 Converting an XML Document into a Tree of Python Objects".
The basic idea of this recipe is to load an XML object into memory and
map the document into a tree of Python objects.
The additional requirement (and so, its modification) regards the fact that
the parser should be aware that input XML files correspond to ASTs.
In particular, the parser returns a list of XML subtrees (represented as
Python objects - see `ParsedTreeNode` class) depending on the selected
parsing granularity.
The different parsing granularity is implemented through subclassing: this class is indeed
an ABC with a set of *Template Methods* [pattern][0] invoked in the main `parse()` method.
For more details, see classes `ClassExtractor`, `MethodExtractor`, and `StatementExtractor`.
[0]: http://en.wikipedia.org/wiki/Template_method_pattern
"""
def __init__(self, encode_node=True):
"""
This class handles the following parsing structure:
- upper_level_info: Dictionary containing references to the upper_level_nodes
parsing strcutures (i.e., the list of root_trees and upper_level_stack)
- upper_level_stack: Parsing stack for upper_level_nodes (as Python list)
- trees: Dictionary containing references to trees and subtrees related to the current
parsing level
- level_keys: List containing the keys of every upper_level_nodes usedin upper_level_info
dictionary (Useful to correctly reference the upper_level_info - i.e.,
the parsing context - to switch to.)
- node_stack: Current (sub)tree parsing stack.
Parameters:
===========
- encode_node: Decide if an integer encoding should be used to enconde upper_level nodes
that will become keys in the `upper_level_info` map (default: True).
If it is `False`, the hashable instance will be used instead.
"""
self.upper_level_info = dict()
self.upper_level_stack = None
self.node_encoding = encode_node
self._level_idx = -1
self.level_keys = list()
# Single root_nodes info
self.trees = None
self.node_stack = None
self.current_tree_index = -1
self._id_counter = 0
self.ref_src_filename = ''
self.ref_src_relative_filepath = ''
self.ref_src_filepath = ''
# ===== Template Methods =====
def is_root(self, node):
"""
[Template method]
The implementation should return
wether or not the input node is a
**root node**.
"""
raise NotImplementedError
def is_upper_level(self, node):
"""
[Template method]
The implementation should return `True` (`False`)
depending if the input node is (is not) an
**upper_level** node.
"""
raise NotImplementedError
def encode_node(self, node, encode=True):
"""
The method returns a unique
key for an input *upper_level_node*.
This is useful to correclty map the
root nodes of subtrees extracted from the input
XML file.
In particular, if the `encode` parameter is set to `False`,
the generated hierarchy structure will
contain a direct reference to the instance of the
upper level node as key.
However, plese note that so far the parsing algorithm
does not add any child node to this one.
This strategy only works because `ParsedTreeNode` instances are hashable objects.
On the other hand, if the `encode` parameter is `True` (default value),
the `hash(node)` will be returned as key.
In this way, no instance will be kept in the map structure, but
the overall space required will be by far more efficient.
The value of the `encode` parameter corresponds to the `node_encoding`
attribute of the `Xml2ObjectParser` class, that is set in the constructor.
"""
if encode:
return hash(node) # integer key - hashing of input ParsedTreeNode instance.
return node # the node instance is hashable, so no encoding is really applied here.
# ===== SAX Parser Hooks =====
def start_element(self, name, attributes):
"""
Callback invoked by the SAX parser whenever an *open* XML tag
has been found.
Since the XML parser should implement a parsing strategy that is
aware of multiple nesting (sub)trees, i.e., classes nested to classes
(internal classes), methods nested to method and so forth,
the algorithms distinguish between two different kinds of *root nodes*:
- upper level nodes
- root nodes
The two kinds are different according to the selected parsing granularity
level (see Template Methods).
**Upper level nodes** are those that contains all the "main" subtrees, while
**root nodes** are the roots of subtrees corresponding to the selected granularity.
For example, in case of a "Class-level" granularity, *upper level nodes* are those
corresponding to the source files containig them, while *root nodes* correspond to
class nodes.
On the other hand, in case of a "Method-level" granularity, *root nodes* are method
nodes while *upper level nodes* correspond to class nodes (i.e., classes that contains
methods)
Note that this parsing strategy will fail in getting methods/functions in languages
such as Python that do not require a method/function to be defined necessarily inside
a Class.
"""
#------
# TODO: Write down few lines to describe *how* the method works
#------
node = ParsedTreeNode(name, attributes)
if node.is_filename:
self.ref_src_filename = attributes.get('name')
self.ref_src_filepath = attributes.get('file_path')
if not self.upper_level_info.keys() and not self.is_upper_level(node):
# So far, no upper level nodes have been found. In this case, simply discard the
# current node!
return
if self.is_upper_level(node):
# Assign src_filename to root node
node.src_filename = self.ref_src_filename
node.src_filepath = self.ref_src_filepath
self._level_idx += 1
self.current_tree_index = -1
node_key = self.encode_node(node, encode=self.node_encoding)
self.upper_level_info[node_key] = {'trees': [], 'level_stack': [node]}
self.level_keys.append(node_key)
# Assign current global_stack and trees list
self.trees = self.upper_level_info[node_key]['trees']
# Set upper_level_stack to the reference in upper_level_info dictionary
self.upper_level_stack = self.upper_level_info[node_key]['level_stack']
if not self.is_root(node) and not len(self.trees):
# So far no root nodes (aka subtree) has been found, so discard the current node!
# TODO: Check if this situation is possible - in general, I think so.
return
if self.is_root(node):
# This is a Root Node, i.e., the root of a new subtree to build up recursively
# Assign src_filename to root node
node.src_filename = self.ref_src_filename
node.src_filepath = self.ref_src_filepath
# Assign node ID
self._id_counter = 0
node.node_id = self._id_counter
self._id_counter += 1
# Add to global stack
self.upper_level_stack.append(node)
self.trees.append({'root': node, 'stack': [node]})
self.current_tree_index = len(self.trees) - 1
self.node_stack = self.trees[self.current_tree_index]['stack']
elif self.node_stack: # Check if there's any subtree under construction
# This is an internal Node!
# Assign node ID
node.node_id = self._id_counter
self._id_counter += 1
# Get Parent node
parent = self.node_stack[-1]
# Add the new child to the parent node
parent.add_child(node)
# Add reference to parent node
node.parent_node = parent
self.node_stack.append(node)
self.upper_level_stack.append(node)
def end_element(self, name):
"""
Callback invoked by the SAX parser whenever a _closed_ XML tag
has been found.
"""
#------
# TODO: Write down few lines describing the logic of the method
#------
if not self.upper_level_stack or not len(self.upper_level_stack):
# No nodes has been found so far and so there's nothing to remove. So keep moving ahead.
return
if not self.node_stack or not len(self.node_stack):
if len(self.upper_level_stack) == 1 and name == self.upper_level_stack[0].name:
# No subtree has been found but only upper_level_nodes. In this case, move ahead
# to remove the node!
pass
else:
# Nothing to remove in the current stack
return
self.upper_level_stack.pop() # pop from the global_stack
if not len(self.upper_level_stack): # Upper_level_stack is emtpy!
self.level_keys.pop()
self._level_idx -= 1 # Update level counter
if self._level_idx < 0:
return # We're done!
# Restore previous parsing context
level_ref = self.upper_level_info[self.level_keys[self._level_idx]]
self.trees = level_ref['trees']
self.upper_level_stack = level_ref['level_stack']
self.current_tree_index = len(self.trees) - 1
if self.current_tree_index >= 0:
self.node_stack = self.trees[self.current_tree_index]['stack']
else:
self.node_stack = None
return
# Delete closed XML element - no index so last element as default
removed_node = self.node_stack.pop()
# If the consumed node was a root node, update current parsing stack
# switching to the previous subtree in the list that has been found so far.
if self.is_root(removed_node):
# Check if current_tree_index must be updated
self.current_tree_index -= 1
prev_node_stack = self.trees[self.current_tree_index]['stack']
while not len(prev_node_stack) and self.current_tree_index >= 0:
self.current_tree_index -= 1
prev_node_stack = self.trees[self.current_tree_index]['stack']
if self.current_tree_index < 0:
self.node_stack = None
self.trees[self.current_tree_index]['stack'] = []
else:
self.node_stack = self.trees[self.current_tree_index]['stack']
def parse(self, filename):
"""
Parse the XML content of the given file
"""
return self.parse_content(open(filename).read())
def parse_content(self, xml_content):
"""
Parse the input XML file of an AST, hooking self methods
to Expat SAX parser.
Returns the exit code of the parser together with a list of
all root_nodes (pointing to parsed subtrees) and a map
containing references to root nodes grouped by upper_level_nodes.
"""
#Create an Expat Parser
parser = expat.ParserCreate()
#Set expat event handler to class custom methods
parser.StartElementHandler = self.start_element
parser.EndElementHandler = self.end_element
# Parse the XML file
parser.Parse(xml_content, 1)
roots_map = dict()
for key, info in self.upper_level_info.iteritems():
roots_map[key] = [tree['root'] for tree in info['trees']]
flat_roots_list = list()
for tree_list in roots_map.itervalues():
for root in tree_list:
flat_roots_list.append(root)
return flat_roots_list, roots_map
# Different Granularity Extractors
class ClassLevelParser(Xml2ObjectParser):
"""
Class level AST parser
Details:
- Upper level node: filename
- root node: class node
"""
def is_root(self, node):
return node.is_class
def is_upper_level(self, node):
return node.is_filename
class MethodLevelParser(Xml2ObjectParser):
"""
Method level AST parser.
Details:
- Upper level node: class node
- root node: method node
"""
def is_root(self, node):
return node.is_method
def is_upper_level(self, node):
return node.is_class
class StatementLevelParser(Xml2ObjectParser):
"""
Statement Level AST parser
Details:
- Upper level node: method node
- root node: statement node
"""
def is_root(self, node):
return node.is_statement
def is_upper_level(self, node):
return node.is_method
class XMLMethodTreeParser(object):
def __init__(self):
'''
Setting up some attributes used in Tree Parsing
'''
self._id_counter = 0 # Node ID Counter
self.node_stack = list() # Node Stack
self.tree_root_node = None # Initialize (Parsed)Tree Root Node
def start_element(self, name, attributes):
"""
Callback invoked by the SAX parser whenever an *open* XML tag
has been found.
Note that this parsing strategy will fail in getting methods/functions in languages
(such as Python) that allow closures.
This Parser assumes AST-XML-Trees that refers to single methods/functions.
"""
node = ParsedTreeNode(name, attributes)
if node.is_method:
# This is a Root Node, i.e., the root of a new subtree to build up recursively
# Assign node ID
node.node_id = self._id_counter
self._id_counter += 1
# Initialize the Node Stack w/ the current node.
self.node_stack = [node]
self.tree_root_node = node # NOTE: No Inner Methods or Functions allowed!!
elif self.node_stack and len(self.node_stack): # Check if there's any subtree under construction
# This is an internal Node!
# Assign node ID
node.node_id = self._id_counter
self._id_counter += 1
# Get Parent node
parent = self.node_stack[-1]
# Add the new child to the parent node
parent.add_child(node)
# Add reference to parent node
node.parent_node = parent
self.node_stack.append(node)
def end_element(self, name):
"""
Callback invoked by the SAX parser whenever a _closed_ XML tag
has been found.
"""
if not self.node_stack or not len(self.node_stack):
# Nothing to remove in the current stack
return
# Delete closed XML element
self.node_stack.pop()
def parse(self, xml_content):
"""
Parse the input XML file of an AST, hooking self methods
to Expat SAX parser.
Returns the exit code of the parser together with a list of
all root_nodes (pointing to parsed subtrees) and a map
containing references to root nodes grouped by upper_level_nodes.
"""
# Create an Expat Parser
parser = expat.ParserCreate()
#Set expat event handler to class custom methods
parser.StartElementHandler = self.start_element
parser.EndElementHandler = self.end_element
# Parse the XML file
parser.Parse(xml_content, 1)
return self.tree_root_node
| {
"content_hash": "01dc2eb1f9675ba5b89ece11c8a1c622",
"timestamp": "",
"source": "github",
"line_count": 880,
"max_line_length": 105,
"avg_line_length": 34.872727272727275,
"alnum_prop": 0.5868743482794577,
"repo_name": "leriomaggio/code-coherence-evaluation-tool",
"id": "24c54b5bda2a232706fc720424b09fc5a48d7b71",
"size": "30712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code_comments_coherence/source_code_analysis/code_analysis/xml_parsers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4554"
},
{
"name": "GAP",
"bytes": "54356"
},
{
"name": "HTML",
"bytes": "101167"
},
{
"name": "Java",
"bytes": "7241060"
},
{
"name": "JavaScript",
"bytes": "4644"
},
{
"name": "Python",
"bytes": "1934652"
}
],
"symlink_target": ""
} |
"""Base module for unittesting."""
from plone.app.testing import applyProfile
from plone.app.testing import FunctionalTesting
from plone.app.testing import IntegrationTesting
from plone.app.testing import login
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone.app.testing import setRoles
from plone.app.testing import TEST_USER_ID
from plone.app.testing import TEST_USER_NAME
from plone.testing import z2
import unittest2 as unittest
class apmSitecontentLayer(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
"""Set up Zope."""
# Load ZCML
import apm.sitecontent
self.loadZCML(package=apm.sitecontent)
z2.installProduct(app, 'apm.sitecontent')
def setUpPloneSite(self, portal):
"""Set up Plone."""
# Install into Plone site using portal_setup
applyProfile(portal, 'apm.sitecontent:default')
# Login and create some test content
setRoles(portal, TEST_USER_ID, ['Manager'])
login(portal, TEST_USER_NAME)
portal.invokeFactory('Folder', 'folder')
# Commit so that the test browser sees these objects
portal.portal_catalog.clearFindAndRebuild()
import transaction
transaction.commit()
def tearDownZope(self, app):
"""Tear down Zope."""
z2.uninstallProduct(app, 'apm.sitecontent')
FIXTURE = apmSitecontentLayer()
INTEGRATION_TESTING = IntegrationTesting(
bases=(FIXTURE,), name="apmSitecontentLayer:Integration")
FUNCTIONAL_TESTING = FunctionalTesting(
bases=(FIXTURE,), name="apmSitecontentLayer:Functional")
class IntegrationTestCase(unittest.TestCase):
"""Base class for integration tests."""
layer = INTEGRATION_TESTING
class FunctionalTestCase(unittest.TestCase):
"""Base class for functional tests."""
layer = FUNCTIONAL_TESTING
| {
"content_hash": "e2d3bbabd116a2caff385faf1deeb999",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 61,
"avg_line_length": 30.25,
"alnum_prop": 0.71849173553719,
"repo_name": "a25kk/apm",
"id": "a198f7ea43744d5a48dc05c8e7bfca83b679f502",
"size": "1960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apm.sitecontent/apm/sitecontent/testing.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "124901"
},
{
"name": "HTML",
"bytes": "237464"
},
{
"name": "JavaScript",
"bytes": "78006"
},
{
"name": "Makefile",
"bytes": "3641"
},
{
"name": "Python",
"bytes": "46393"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
} |
'''
Code for cifar10_v1 network, and visualizing filters.
'''
from __future__ import print_function
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD, adadelta, rmsprop
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
import cPickle
import numpy as np
def cifar(weights='MODS_keras_weights_3_he_normal_0.5_rmsprop_24.h5'):
nb_classes = 2
#Hyperparameters for tuning
weight_init = 'he_normal'
dropout = 0.5
# input image dimensions
img_rows, img_cols = 256, 192
# my images are images are greyscale
img_channels = 1
model = Sequential()
model.add(Convolution2D(128, 3, 3,
input_shape=(img_channels, img_rows, img_cols), init=weight_init, name='conv1_1'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv1_2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dropout))
model.add(Convolution2D(256, 3, 3,init=weight_init, name='conv2_1'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(256, 3, 3,init=weight_init, name='conv2_2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(256, 3, 3,init=weight_init, name='conv2_3'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dropout))
model.add(Convolution2D(512, 3, 3, init=weight_init, name='conv3_1'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(512, 3, 3, init=weight_init, name='conv3_2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dropout))
model.add(Convolution2D(1024, 3,3,border_mode='same',init=weight_init, name='conv4_1'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(1024, 3,3,border_mode='same',init=weight_init, name='conv4_2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dropout))
model.add(Flatten())
model.add(Dense(120,init=weight_init))
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dropout(dropout))
model.add(Dense(nb_classes))
#model.add(Activation('softmax'))
model.add(Activation('sigmoid'))
model.load_weights(weights)
return model
| {
"content_hash": "9425cf62736d5f06c63cf2fe0528e781",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 100,
"avg_line_length": 29.802197802197803,
"alnum_prop": 0.7352507374631269,
"repo_name": "santiagolopezg/MODS_ConvNet",
"id": "8fb4153dd1eaf98ac1639eff24eeb57a9998e989",
"size": "2712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old code/cifar10_visualize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "213612"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UrlImage.resource'
db.add_column(u'catalog_urlimage', 'resource',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['catalog.Resource']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UrlImage.resource'
db.delete_column(u'catalog_urlimage', 'resource_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'catalog.category': {
'Meta': {'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'catalog.city': {
'Meta': {'object_name': 'City'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'catalog.coordsystem': {
'EPSG_code': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'Meta': {'ordering': "['EPSG_code']", 'object_name': 'CoordSystem'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'catalog.county': {
'Meta': {'object_name': 'County'},
'cities': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'counties'", 'symmetrical': 'False', 'to': u"orm['catalog.City']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'catalog.datatype': {
'Meta': {'object_name': 'DataType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'catalog.department': {
'Meta': {'object_name': 'Department'},
'divisions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['catalog.Division']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'state'", 'max_length': '40'})
},
u'catalog.division': {
'Meta': {'object_name': 'Division'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'catalog.resource': {
'Meta': {'object_name': 'Resource'},
'agency_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'area_of_interest': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'resources'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Category']"}),
'cities': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['catalog.City']", 'null': 'True', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'contact_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'coord_sys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['catalog.CoordSystem']", 'null': 'True', 'blank': 'True'}),
'counties': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['catalog.County']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by'", 'to': u"orm['auth.User']"}),
'csw_anytext': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'csw_mdsource': ('django.db.models.fields.CharField', [], {'default': "'local'", 'max_length': '100'}),
'csw_schema': ('django.db.models.fields.CharField', [], {'default': "'http://www.opengis.net/cat/csw/2.0.2'", 'max_length': '200'}),
'csw_typename': ('django.db.models.fields.CharField', [], {'default': "'csw:Record'", 'max_length': '200'}),
'csw_xml': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'data_formats': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'data_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['catalog.DataType']", 'null': 'True', 'blank': 'True'}),
'department': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Department']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'division': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Division']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '255', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_updated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'updated_by'", 'to': u"orm['auth.User']"}),
'metadata_contact': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'metadata_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'proj_coord_sys': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rating_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'rating_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'release_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'time_period': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'update_frequency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updates': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.UpdateFrequency']", 'null': 'True', 'blank': 'True'}),
'usage': ('django.db.models.fields.TextField', [], {}),
'wkt_geometry': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'catalog.updatefrequency': {
'Meta': {'ordering': "['update_frequency']", 'object_name': 'UpdateFrequency'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'update_frequency': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'catalog.url': {
'Meta': {'object_name': 'Url'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Resource']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url_label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.UrlType']"})
},
u'catalog.urlimage': {
'Meta': {'object_name': 'UrlImage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Resource']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Url']"})
},
u'catalog.urltype': {
'Meta': {'ordering': "['url_type']", 'object_name': 'UrlType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url_type': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['catalog'] | {
"content_hash": "45c2ba8ded7da45cb361159dc1f18165",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 206,
"avg_line_length": 75.54970760233918,
"alnum_prop": 0.547565601052713,
"repo_name": "openrural/open-data-nc",
"id": "cc1b2403e362e3961a5b8bdfeec31fe361ff3823",
"size": "12943",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "opendata/catalog/migrations/0006_auto__add_field_urlimage_resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "508367"
},
{
"name": "Elixir",
"bytes": "536"
},
{
"name": "JavaScript",
"bytes": "333168"
},
{
"name": "Python",
"bytes": "346034"
},
{
"name": "Scheme",
"bytes": "12750"
},
{
"name": "Shell",
"bytes": "96367"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
from AdaptivePELE.atomset import atomset, SymmetryContactMapEvaluator
from AdaptivePELE.utilities import utilities
import matplotlib.pyplot as plt
def parseArguments():
desc = "Calculate the histogram of ContactMap contacts over a trajectory, either as pdb files or from a clustering\n"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("resname", type=str, help="Ligand resname in pdb")
parser.add_argument("contactThreshold", type=int, help="Contact threshold to calculate contactMap")
parser.add_argument("-trajectory", type=str, nargs='+', help="Path to the trajectory or pdbs to analyse")
parser.add_argument("-clustering", help="Path to the clustering object to analyse")
parser.add_argument("-nRes", type=int, default=10, help="Number of top residues to display")
parser.add_argument("--top", type=str, default=None, help="Topology file needed for non-pdb trajectories")
args = parser.parse_args()
return args.trajectory, args.clustering, args.nRes, args.resname, args.contactThreshold, args.top
def generateConformations(resname, clAcc, trajectory, topology):
if topology is None:
topology_contents = None
else:
topology_contents = utilities.getTopologyFile(topology)
if clAcc is None:
for traj in trajectory:
snapshots = utilities.getSnapshots(traj, topology=topology)
for snapshot in snapshots:
PDBobj = atomset.PDB()
PDBobj.initialise(snapshot, resname=resname, topology=topology_contents)
yield PDBobj
else:
for cluster in clAcc.clusters.clusters:
yield cluster.pdb
if __name__ == "__main__":
traj_name, clustering, nRes, lig_resname, contactThreshold, top = parseArguments()
if clustering is None:
clusterAcc = None
else:
clusetrAcc = utilities.readClusteringObject(clustering)
totalAcc = []
symEval = SymmetryContactMapEvaluator.SymmetryContactMapEvaluator()
refPDB = None
for pdb in generateConformations(lig_resname, clusetrAcc, traj_name, top):
if refPDB is None:
refPDB = pdb
contactMap, foo = symEval.createContactMap(pdb, lig_resname, contactThreshold)
if len(totalAcc):
totalAcc += contactMap.sum(axis=0, dtype=bool).astype(int)
else:
totalAcc = contactMap.sum(axis=0, dtype=bool).astype(int)
proteinList = symEval.proteinList
residueCounts = {}
totCounts = 0
for atomID, counts in zip(proteinList, totalAcc):
res = refPDB.atoms[atomID].resnum
totCounts += counts
if res in residueCounts:
residueCounts[res] += counts
else:
residueCounts[res] = counts
for res in residueCounts:
residueCounts[res] /= float(totCounts)
print("Residue\tResidue frequency")
for res in sorted(residueCounts, key=lambda x: residueCounts[x], reverse=True)[:nRes]:
print("%s\t%.4f" % (res, residueCounts[res]))
plt.figure()
plt.ylabel("Contact frequency")
plt.xlabel("Residue number")
plt.bar(list(residueCounts.keys()), residueCounts.values())
plt.savefig("hist_CM.png")
plt.show()
| {
"content_hash": "ee9f58f8fabdc8679aa006a0d9db0c6d",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 121,
"avg_line_length": 40.97530864197531,
"alnum_prop": 0.6839409460680927,
"repo_name": "AdaptivePELE/AdaptivePELE",
"id": "16704d7a20e9c3c6f00ef1c9a7de39a3875fc7a5",
"size": "3319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AdaptivePELE/analysis/histCM.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "MATLAB",
"bytes": "39265"
},
{
"name": "Makefile",
"bytes": "5601"
},
{
"name": "Python",
"bytes": "1078513"
},
{
"name": "R",
"bytes": "13841"
},
{
"name": "Shell",
"bytes": "97160"
}
],
"symlink_target": ""
} |
"""
Created on Thu Mar 02 16:32:18 2017
@author: Colin Dryaton cdrayton@umich.edu
"""
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
from gensim import corpora, models, similarities
import gensim
#import json
#import pandas as pd
#from glob import glob
#import re
import sys
import json
# Open the file (make sure its in the same directory as this file)
"""created the model with the below code"""
args = sys.argv
if len(args) > 1:
dims = int(args[1])
else:
dims = 300
LabeledSentence = gensim.models.doc2vec.LabeledSentence
doc2vec_dir ="Data/doc2vec/not_trump"
token_type = "zub_"
#sentences = []
#with open(doc2vec_dir+token_type+"doc2vec_train_corpus.txt",'r')as corpfile:
# sentences=[sent.split() for sent in corpfile.readlines()]
with open(doc2vec_dir+token_type+"id_text_dic.json",'r')as corpfile:
sent_dic = json.load(corpfile)
sentences = [LabeledSentence(v.split(),[str(k)]) for k,v in sent_dic.items()]
#sentences = models.doc2vec.TaggedLineDocument(doc2vec_dir+token_type+"doc2vec_train_corpus.txt")#yelp_data_small(words="sent_doc2vec", labels="label_doc2vec")
model_zub = models.Doc2Vec(sentences, size=dims, window=8, min_count=0, workers=4)
dims = str(dims)
model_zub.save(doc2vec_dir+token_type+"rumorEval_doc2vec"+dims+".model")
model_zub.init_sims(replace=True)
model_zub.save(doc2vec_dir+token_type+"rumorEval_doc2vec_set"+dims+".model")
dims =int(dims)
token_type = "twit_"
sentences = []
with open(doc2vec_dir+token_type+"id_text_dic.json",'r')as corpfile:
sent_dic = json.load(corpfile)
sentences = [LabeledSentence(v.split(),[str(k)]) for k,v in sent_dic.items()]
#sentences = models.doc2vec.TaggedLineDocument(doc2vec_dir+token_type+"doc2vec_train_corpus.txt")#yelp_data_small(words="sent_doc2vec", labels="label_doc2vec")
model_twit = models.Doc2Vec(sentences, size=dims, window=8, min_count=0, workers=4)
dims = str(dims)
model_twit.save(doc2vec_dir+token_type+"rumorEval_doc2vec"+dims+".model")
model_twit.init_sims(replace=True)
model_twit.save(doc2vec_dir+token_type+"rumorEval_doc2vec_set"+dims+".model")
print("\n")
print(model_zub.most_similar('sad'))
print(model_zub['sad'])
print(model_zub.docvecs.most_similar("552783667052167168"))
#print(model_twit.docvecs.most_similar('155014799909064704'))
print("\n")
#print model_twit.most_similar('black')
#for i in sentences[1130]:
# print i
#print len(sentences_twit)
#print sentences_twit[0]
#print len(sentences_twit) | {
"content_hash": "40e4f43610384f347a3b6cb330300ad6",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 159,
"avg_line_length": 33.03947368421053,
"alnum_prop": 0.7311827956989247,
"repo_name": "scramblingbalam/Alta_Real",
"id": "fee7ab166b63a7b97a78a7ef2b2e8aa277bed7a4",
"size": "2535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "create_doc2vec_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "178293"
}
],
"symlink_target": ""
} |
import asyncio
import collections
class EventResultOrError:
"""
This class wrappers the Event asyncio lock allowing either awake the
locked Tasks without any error or raising an exception.
thanks to @vorpalsmith for the simple design.
"""
def __init__(self, loop):
self._loop = loop
self._exc = None
self._event = asyncio.Event(loop=loop)
self._waiters = collections.deque()
def set(self, exc=None):
self._exc = exc
self._event.set()
async def wait(self):
waiter = self._loop.create_task(self._event.wait())
self._waiters.append(waiter)
try:
val = await waiter
finally:
self._waiters.remove(waiter)
if self._exc is not None:
raise self._exc
return val
def cancel(self):
""" Cancel all waiters """
for waiter in self._waiters:
waiter.cancel()
| {
"content_hash": "f49004bc1280b38c3dca5a870e2f9c68",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 24.894736842105264,
"alnum_prop": 0.5835095137420718,
"repo_name": "rutsky/aiohttp",
"id": "e57ec2e5cd35b6c6d026e09aa42c83ab0372aede",
"size": "946",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "aiohttp/locks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "C",
"bytes": "187294"
},
{
"name": "Gherkin",
"bytes": "266"
},
{
"name": "Makefile",
"bytes": "3195"
},
{
"name": "Python",
"bytes": "1487288"
},
{
"name": "Shell",
"bytes": "2877"
}
],
"symlink_target": ""
} |
"""prelude -- extra builtins"""
from __future__ import absolute_import
import os, logging
__all__ = ('log', )
log = logging.getLogger(os.path.basename(os.path.dirname(__file__)))
log.addHandler(logging.StreamHandler())
| {
"content_hash": "abc8e54b8947b08f5e340c03e7bd5141",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 68,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.6936936936936937,
"repo_name": "thisismedium/python-sasl",
"id": "c85ef899bdcca9477ba0e4eeef2c3e4c9e028f12",
"size": "348",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sasl/prelude.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "38439"
}
],
"symlink_target": ""
} |
import os, platform
( system, node, release, version, machine, processor ) = platform.uname()
from SCons.Script import *
def _DoAssembleBundle( target, source, env ):
topdir = os.getcwd()
try:
print('create the bundle structure for %s' % str( target[0] ) )
bundle_dir = str( target[0] )
source_bin = str( source[0] )
macos_dir = os.path.join( bundle_dir, 'Contents/MacOS' )
os.makedirs( macos_dir )
os.chdir( macos_dir )
os.symlink( os.path.join( '../../..', source_bin ), source_bin )
os.chdir( topdir )
resource_path = os.path.join( bundle_dir, 'Contents/Resources' )
os.makedirs( resource_path )
contents_dir = os.path.join( bundle_dir, 'Contents' )
os.chdir( contents_dir )
ogre_bin_dir = os.path.join( env['OGRE_SRC'], 'lib/Release' )
os.symlink( ogre_bin_dir, 'Frameworks' )
os.symlink( ogre_bin_dir, 'Plugins' )
os.chdir( topdir )
except:
os.chdir( topdir )
raise
def AppendOSXBundleBuilder( env ):
if ( system == 'Darwin' ):
b = Builder( action = _DoAssembleBundle )
else:
# dummy builder that does nothing
b = Builder( action = '' )
env.Append( BUILDERS = { 'Bundle' : b } )
return env
| {
"content_hash": "811d1315268c41829cdb90aa3dd5b73c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 73,
"avg_line_length": 35,
"alnum_prop": 0.5814671814671815,
"repo_name": "modulexcite/es_core",
"id": "32e1b3dae29084dc3f2554acff9698336ef071d4",
"size": "1403",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "site_scons/site_init.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "4022"
},
{
"name": "C",
"bytes": "15302"
},
{
"name": "C++",
"bytes": "92502"
},
{
"name": "CMake",
"bytes": "7202"
},
{
"name": "FLUX",
"bytes": "9614"
},
{
"name": "GLSL",
"bytes": "178351"
},
{
"name": "Gnuplot",
"bytes": "1174"
},
{
"name": "HTML",
"bytes": "9848"
},
{
"name": "Haskell",
"bytes": "6201"
},
{
"name": "Makefile",
"bytes": "302"
},
{
"name": "Objective-C++",
"bytes": "2502"
},
{
"name": "Python",
"bytes": "6198"
}
],
"symlink_target": ""
} |
"""
Support for MQTT JSON lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.mqtt_json/
"""
import asyncio
import logging
import json
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.components.mqtt as mqtt
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_FLASH,
ATTR_RGB_COLOR, ATTR_TRANSITION, ATTR_WHITE_VALUE, ATTR_XY_COLOR,
FLASH_LONG, FLASH_SHORT, Light, PLATFORM_SCHEMA, SUPPORT_BRIGHTNESS,
SUPPORT_COLOR_TEMP, SUPPORT_EFFECT, SUPPORT_FLASH, SUPPORT_RGB_COLOR,
SUPPORT_TRANSITION, SUPPORT_WHITE_VALUE, SUPPORT_XY_COLOR)
from homeassistant.const import (
CONF_BRIGHTNESS, CONF_COLOR_TEMP, CONF_EFFECT,
CONF_NAME, CONF_OPTIMISTIC, CONF_RGB, CONF_WHITE_VALUE, CONF_XY)
from homeassistant.components.mqtt import (
CONF_STATE_TOPIC, CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'mqtt_json'
DEPENDENCIES = ['mqtt']
DEFAULT_BRIGHTNESS = False
DEFAULT_COLOR_TEMP = False
DEFAULT_EFFECT = False
DEFAULT_FLASH_TIME_LONG = 10
DEFAULT_FLASH_TIME_SHORT = 2
DEFAULT_NAME = 'MQTT JSON Light'
DEFAULT_OPTIMISTIC = False
DEFAULT_RGB = False
DEFAULT_WHITE_VALUE = False
DEFAULT_XY = False
CONF_EFFECT_LIST = 'effect_list'
CONF_FLASH_TIME_LONG = 'flash_time_long'
CONF_FLASH_TIME_SHORT = 'flash_time_short'
# Stealing some of these from the base MQTT configs.
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_BRIGHTNESS, default=DEFAULT_BRIGHTNESS): cv.boolean,
vol.Optional(CONF_COLOR_TEMP, default=DEFAULT_COLOR_TEMP): cv.boolean,
vol.Optional(CONF_EFFECT, default=DEFAULT_EFFECT): cv.boolean,
vol.Optional(CONF_EFFECT_LIST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_FLASH_TIME_SHORT, default=DEFAULT_FLASH_TIME_SHORT):
cv.positive_int,
vol.Optional(CONF_FLASH_TIME_LONG, default=DEFAULT_FLASH_TIME_LONG):
cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_QOS, default=mqtt.DEFAULT_QOS):
vol.All(vol.Coerce(int), vol.In([0, 1, 2])),
vol.Optional(CONF_RETAIN, default=mqtt.DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_RGB, default=DEFAULT_RGB): cv.boolean,
vol.Optional(CONF_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_WHITE_VALUE, default=DEFAULT_WHITE_VALUE): cv.boolean,
vol.Optional(CONF_XY, default=DEFAULT_XY): cv.boolean,
vol.Required(CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up a MQTT JSON Light."""
if discovery_info is not None:
config = PLATFORM_SCHEMA(discovery_info)
async_add_devices([MqttJson(
config.get(CONF_NAME),
config.get(CONF_EFFECT_LIST),
{
key: config.get(key) for key in (
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC
)
},
config.get(CONF_QOS),
config.get(CONF_RETAIN),
config.get(CONF_OPTIMISTIC),
config.get(CONF_BRIGHTNESS),
config.get(CONF_COLOR_TEMP),
config.get(CONF_EFFECT),
config.get(CONF_RGB),
config.get(CONF_WHITE_VALUE),
config.get(CONF_XY),
{
key: config.get(key) for key in (
CONF_FLASH_TIME_SHORT,
CONF_FLASH_TIME_LONG
)
}
)])
class MqttJson(Light):
"""Representation of a MQTT JSON light."""
def __init__(self, name, effect_list, topic, qos, retain, optimistic,
brightness, color_temp, effect, rgb, white_value, xy,
flash_times):
"""Initialize MQTT JSON light."""
self._name = name
self._effect_list = effect_list
self._topic = topic
self._qos = qos
self._retain = retain
self._optimistic = optimistic or topic[CONF_STATE_TOPIC] is None
self._state = False
if brightness:
self._brightness = 255
else:
self._brightness = None
if color_temp:
self._color_temp = 150
else:
self._color_temp = None
if effect:
self._effect = 'none'
else:
self._effect = None
if rgb:
self._rgb = [0, 0, 0]
else:
self._rgb = None
if white_value:
self._white_value = 255
else:
self._white_value = None
if xy:
self._xy = [1, 1]
else:
self._xy = None
self._flash_times = flash_times
self._supported_features = (SUPPORT_TRANSITION | SUPPORT_FLASH)
self._supported_features |= (rgb and SUPPORT_RGB_COLOR)
self._supported_features |= (brightness and SUPPORT_BRIGHTNESS)
self._supported_features |= (color_temp and SUPPORT_COLOR_TEMP)
self._supported_features |= (effect and SUPPORT_EFFECT)
self._supported_features |= (white_value and SUPPORT_WHITE_VALUE)
self._supported_features |= (xy and SUPPORT_XY_COLOR)
@asyncio.coroutine
def async_added_to_hass(self):
"""Subscribe to MQTT events.
This method is a coroutine.
"""
@callback
def state_received(topic, payload, qos):
"""Handle new MQTT messages."""
values = json.loads(payload)
if values['state'] == 'ON':
self._state = True
elif values['state'] == 'OFF':
self._state = False
if self._rgb is not None:
try:
red = int(values['color']['r'])
green = int(values['color']['g'])
blue = int(values['color']['b'])
self._rgb = [red, green, blue]
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid RGB color value received")
if self._brightness is not None:
try:
self._brightness = int(values['brightness'])
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid brightness value received")
if self._color_temp is not None:
try:
self._color_temp = int(values['color_temp'])
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid color temp value received")
if self._effect is not None:
try:
self._effect = values['effect']
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid effect value received")
if self._white_value is not None:
try:
self._white_value = int(values['white_value'])
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid white value received")
if self._xy is not None:
try:
x_color = float(values['color']['x'])
y_color = float(values['color']['y'])
self._xy = [x_color, y_color]
except KeyError:
pass
except ValueError:
_LOGGER.warning("Invalid XY color value received")
self.async_schedule_update_ha_state()
if self._topic[CONF_STATE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_STATE_TOPIC], state_received,
self._qos)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def color_temp(self):
"""Return the color temperature in mired."""
return self._color_temp
@property
def effect(self):
"""Return the current effect."""
return self._effect
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._effect_list
@property
def rgb_color(self):
"""Return the RGB color value."""
return self._rgb
@property
def white_value(self):
"""Return the white property."""
return self._white_value
@property
def xy_color(self):
"""Return the XY color value."""
return self._xy
@property
def should_poll(self):
"""No polling needed for a MQTT light."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
@asyncio.coroutine
def async_turn_on(self, **kwargs):
"""Turn the device on.
This method is a coroutine.
"""
should_update = False
message = {'state': 'ON'}
if ATTR_RGB_COLOR in kwargs:
message['color'] = {
'r': kwargs[ATTR_RGB_COLOR][0],
'g': kwargs[ATTR_RGB_COLOR][1],
'b': kwargs[ATTR_RGB_COLOR][2]
}
if self._optimistic:
self._rgb = kwargs[ATTR_RGB_COLOR]
should_update = True
if ATTR_FLASH in kwargs:
flash = kwargs.get(ATTR_FLASH)
if flash == FLASH_LONG:
message['flash'] = self._flash_times[CONF_FLASH_TIME_LONG]
elif flash == FLASH_SHORT:
message['flash'] = self._flash_times[CONF_FLASH_TIME_SHORT]
if ATTR_TRANSITION in kwargs:
message['transition'] = int(kwargs[ATTR_TRANSITION])
if ATTR_BRIGHTNESS in kwargs:
message['brightness'] = int(kwargs[ATTR_BRIGHTNESS])
if self._optimistic:
self._brightness = kwargs[ATTR_BRIGHTNESS]
should_update = True
if ATTR_COLOR_TEMP in kwargs:
message['color_temp'] = int(kwargs[ATTR_COLOR_TEMP])
if self._optimistic:
self._color_temp = kwargs[ATTR_COLOR_TEMP]
should_update = True
if ATTR_EFFECT in kwargs:
message['effect'] = kwargs[ATTR_EFFECT]
if self._optimistic:
self._effect = kwargs[ATTR_EFFECT]
should_update = True
if ATTR_WHITE_VALUE in kwargs:
message['white_value'] = int(kwargs[ATTR_WHITE_VALUE])
if self._optimistic:
self._white_value = kwargs[ATTR_WHITE_VALUE]
should_update = True
if ATTR_XY_COLOR in kwargs:
message['color'] = {
'x': kwargs[ATTR_XY_COLOR][0],
'y': kwargs[ATTR_XY_COLOR][1]
}
if self._optimistic:
self._xy = kwargs[ATTR_XY_COLOR]
should_update = True
mqtt.async_publish(
self.hass, self._topic[CONF_COMMAND_TOPIC], json.dumps(message),
self._qos, self._retain)
if self._optimistic:
# Optimistically assume that the light has changed state.
self._state = True
should_update = True
if should_update:
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_turn_off(self, **kwargs):
"""Turn the device off.
This method is a coroutine.
"""
message = {'state': 'OFF'}
if ATTR_TRANSITION in kwargs:
message['transition'] = int(kwargs[ATTR_TRANSITION])
mqtt.async_publish(
self.hass, self._topic[CONF_COMMAND_TOPIC], json.dumps(message),
self._qos, self._retain)
if self._optimistic:
# Optimistically assume that the light has changed state.
self._state = False
self.async_schedule_update_ha_state()
| {
"content_hash": "5e7b7a7aa30555a18e8563ade1147f45",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 79,
"avg_line_length": 31.87878787878788,
"alnum_prop": 0.5665399239543726,
"repo_name": "ewandor/home-assistant",
"id": "e3e3f7dafde6fb82480babe624c7b7857a2d421a",
"size": "12624",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/light/mqtt_json.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8860790"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "12639"
}
],
"symlink_target": ""
} |
import sys, os
import datetime
import jinja2.filters
# Needed to properly import project docstrings
import django.conf
django.conf.settings.configure()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.extlinks']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-herokuify'
copyright = jinja2.filters.do_mark_safe('%s, <a href="http://en.ig.ma/">Filip Wasilewski</a>' % datetime.date.today().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.pre3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ["herokuify."]
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_sidebars = {
'**': ['localtoc.html', "relations.html", 'quicklinks.html', 'searchbox.html', 'editdocument.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-herokuifydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-herokuify.tex', u'django-herokuify Documentation',
u'Filip Wasilewski', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-herokuify', u'django-herokuify Documentation',
[u'Filip Wasilewski'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-herokuify', u'django-herokuify Documentation',
u'Filip Wasilewski', 'django-herokuify', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
autodoc_member_order = 'bysource'
| {
"content_hash": "7597fac8f43b89d9940010a54e32e12f",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 123,
"avg_line_length": 32.67755102040816,
"alnum_prop": 0.7060954284286785,
"repo_name": "nigma/django-herokuify",
"id": "bbfa4a9f9e914454ec875ab175f2d745ab700e3a",
"size": "8433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14968"
}
],
"symlink_target": ""
} |
import json
id2handlerInfo = {}
class JSONSerializable:
def __str__(self):
return jsonify(self)
def __repr__(self):
return jsonify(self, indent=4)
class EventHandlerInfo(JSONSerializable):
def __init__(self):
self.selector = None
self.event = None
self.uiData = None
self.handler = None
self._handler = None
self.filter_selector = None
self.stop_propagation = False
self.throttle = False
def get_handler_info(func) -> EventHandlerInfo:
function_id = id(func)
handler_info = id2handlerInfo.get(function_id)
if handler_info is None:
handler_info = EventHandlerInfo()
id2handlerInfo[function_id] = handler_info
return handler_info
def jsonify(obj, **kwargs) -> str:
return json.dumps(obj,
default=lambda o: {k: v for k, v in o.__dict__.items() if not k.startswith('_')},
separators=(',', ':'), **kwargs)
class UIData:
def __init__(self, data):
self._data = data
def __getattr__(self, item):
return self._data[item]
def __repr__(self):
return repr(self._data)
| {
"content_hash": "8a7d2f70554b223f6fa04715a69de0cd",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 103,
"avg_line_length": 24.040816326530614,
"alnum_prop": 0.5840407470288624,
"repo_name": "red8012/portkey",
"id": "22e891e14489ab2f02120d9ba1146be362d32ad6",
"size": "1178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "portkey/utilities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "5653"
},
{
"name": "Python",
"bytes": "13341"
}
],
"symlink_target": ""
} |
"""
:mod:`pyffi.spells.nif.modify` --- spells to make modifications
=================================================================
Module which contains all spells that modify a nif.
.. autoclass:: SpellTexturePath
:show-inheritance:
:members:
.. autoclass:: SpellSubstituteTexturePath
:show-inheritance:
:members:
.. autoclass:: SpellLowResTexturePath
:show-inheritance:
:members:
.. autoclass:: SpellCollisionType
:show-inheritance:
:members:
.. autoclass:: SpellCollisionMaterial
:show-inheritance:
:members:
.. autoclass:: SpellScaleAnimationTime
:show-inheritance:
:members:
.. autoclass:: SpellReverseAnimation
:show-inheritance:
:members:
.. autoclass:: SpellSubstituteStringPalette
:show-inheritance:
:members:
.. autoclass:: SpellChangeBonePriorities
:show-inheritance:
:members:
.. autoclass:: SpellSetInterpolatorTransRotScale
:show-inheritance:
:members:
.. autoclass:: SpellDelInterpolatorTransformData
:show-inheritance:
:members:
.. autoclass:: SpellDelBranches
:show-inheritance:
:members:
.. autoclass:: _SpellDelBranchClasses
:show-inheritance:
:members:
.. autoclass:: SpellDelSkinShapes
:show-inheritance:
:members:
.. autoclass:: SpellDisableParallax
:show-inheritance:
:members:
.. autoclass:: SpellAddStencilProperty
:show-inheritance:
:members:
.. autoclass:: SpellDelVertexColor
:show-inheritance:
:members:
.. autoclass:: SpellMakeSkinlessNif
:show-inheritance:
:members:
.. autoclass:: SpellCleanFarNif
:show-inheritance:
:members:
.. autoclass:: SpellMakeFarNif
:show-inheritance:
:members:
"""
# --------------------------------------------------------------------------
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright (c) 2007-2012, NIF File Format Library and Tools.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIF File Format Library and Tools
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
# --------------------------------------------------------------------------
from pyffi.formats.nif import NifFormat
from pyffi.object_models.common import _as_bytes
from pyffi.spells.nif import NifSpell
import pyffi.spells.nif
import pyffi.spells.nif.check # recycle checking spells for update spells
import pyffi.spells.nif.fix
import codecs
import os
import re
class SpellTexturePath(
pyffi.spells.nif.fix.SpellParseTexturePath):
"""Changes the texture path while keeping the texture names."""
SPELLNAME = "modify_texturepath"
READONLY = False
@classmethod
def toastentry(cls, toaster):
if not toaster.options["arg"]:
toaster.logger.warn(
"must specify path as argument "
"(e.g. -a textures\\pm\\dungeons\\bloodyayleid\\interior) "
"to apply spell")
return False
else:
toaster.texture_path = str(toaster.options["arg"])
# standardize the path
toaster.texture_path = toaster.texture_path.replace("/", os.sep)
toaster.texture_path = toaster.texture_path.replace("\\", os.sep)
return True
def substitute(self, old_path):
# note: replace backslashes by os.sep in filename, and
# when joined, revert them back, for linux
new_path = os.path.join(
self.toaster.texture_path,
os.path.basename(old_path.replace("\\", os.sep))
).replace(os.sep, "\\")
if new_path != old_path:
self.changed = True
self.toaster.msg("%s -> %s" % (old_path, new_path))
return new_path
class SpellSubstituteTexturePath(
pyffi.spells.nif.fix.SpellFixTexturePath):
"""Runs a regex replacement on texture paths."""
SPELLNAME = "modify_substitutetexturepath"
@classmethod
def toastentry(cls, toaster):
arg = toaster.options["arg"]
if not arg:
# missing arg
toaster.logger.warn(
"must specify regular expression and substitution as argument "
"(e.g. -a /architecture/city) to apply spell")
return False
dummy, toaster.regex, toaster.sub = arg.split(arg[0])
toaster.sub = _as_bytes(toaster.sub)
toaster.regex = re.compile(_as_bytes(toaster.regex))
return True
def substitute(self, old_path):
"""Returns modified texture path, and reports if path was modified.
"""
if not old_path:
# leave empty path be
return old_path
new_path = self.toaster.regex.sub(self.toaster.sub, old_path)
if old_path != new_path:
self.changed = True
self.toaster.msg("%s -> %s" % (old_path, new_path))
return new_path
class SpellLowResTexturePath(SpellSubstituteTexturePath):
"""Changes the texture path by replacing 'textures\\*' with
'textures\\lowres\\*' - used mainly for making _far.nifs
"""
SPELLNAME = "modify_texturepathlowres"
@classmethod
def toastentry(cls, toaster):
toaster.sub = _as_bytes("textures\\\\lowres\\\\")
toaster.regex = re.compile(_as_bytes("^textures\\\\"), re.IGNORECASE)
return True
def substitute(self, old_path):
if (_as_bytes('\\lowres\\') not in old_path.lower()):
return SpellSubstituteTexturePath.substitute(self, old_path)
else:
return old_path
class SpellCollisionType(NifSpell):
"""Sets the object collision to be a different type"""
SPELLNAME = "modify_collisiontype"
READONLY = False
class CollisionTypeStatic:
layer = 1
motion_system = 7
unknown_byte1 = 1
unknown_byte2 = 1
quality_type = 1
wind = 0
solid = True
mass = 0
class CollisionTypeAnimStatic(CollisionTypeStatic):
layer = 2
motion_system = 6
unknown_byte1 = 2
unknown_byte2 = 2
quality_type = 2
class CollisionTypeTerrain(CollisionTypeStatic):
layer = 14
motion_system = 7
class CollisionTypeClutter(CollisionTypeAnimStatic):
layer = 4
motion_system = 4
quality_type = 3
mass = 10
class CollisionTypeWeapon(CollisionTypeClutter):
layer = 5
mass = 25
class CollisionTypeNonCollidable(CollisionTypeStatic):
layer = 15
motion_system = 7
COLLISION_TYPE_DICT = {
"static": CollisionTypeStatic,
"anim_static": CollisionTypeAnimStatic,
"clutter": CollisionTypeClutter,
"weapon": CollisionTypeWeapon,
"terrain": CollisionTypeTerrain,
"non_collidable": CollisionTypeNonCollidable
}
@classmethod
def toastentry(cls, toaster):
try:
toaster.col_type = cls.COLLISION_TYPE_DICT[toaster.options["arg"]]
except KeyError:
# incorrect arg
toaster.logger.warn(
"must specify collision type to change to as argument "
"(e.g. -a static (accepted names: %s) "
"to apply spell"
% ", ".join(iter(cls.COLLISION_TYPE_DICT.keys())))
return False
else:
return True
def datainspect(self):
return self.inspectblocktype(NifFormat.bhkRigidBody)
def branchinspect(self, branch):
# only inspect the NiAVObject branch
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.bhkCollisionObject,
NifFormat.bhkRigidBody,
NifFormat.bhkMoppBvTreeShape,
NifFormat.bhkPackedNiTriStripsShape))
def branchentry(self, branch):
if isinstance(branch, NifFormat.bhkRigidBody):
self.changed = True
branch.layer = self.toaster.col_type.layer
branch.layer_copy = self.toaster.col_type.layer
branch.mass = self.toaster.col_type.mass
branch.motion_system = self.toaster.col_type.motion_system
branch.unknown_byte_1 = self.toaster.col_type.unknown_byte1
branch.unknown_byte_2 = self.toaster.col_type.unknown_byte2
branch.quality_type = self.toaster.col_type.quality_type
branch.wind = self.toaster.col_type.wind
branch.solid = self.toaster.col_type.solid
self.toaster.msg("collision set to %s"
% self.toaster.options["arg"])
# bhkPackedNiTriStripsShape could be further down, so keep looking
return True
elif isinstance(branch, NifFormat.bhkPackedNiTriStripsShape):
self.changed = True
for subshape in branch.get_sub_shapes():
subshape.layer = self.toaster.col_type.layer
self.toaster.msg("collision set to %s"
% self.toaster.options["arg"])
# all extra blocks here done; no need to recurse further
return False
else:
# recurse further
return True
class SpellScaleAnimationTime(NifSpell):
"""Scales the animation time."""
SPELLNAME = "modify_scaleanimationtime"
READONLY = False
@classmethod
def toastentry(cls, toaster):
if not toaster.options["arg"]:
toaster.logger.warn(
"must specify scaling number as argument "
"(e.g. -a 0.6) to apply spell")
return False
else:
toaster.animation_scale = float(toaster.options["arg"])
return True
def datainspect(self):
# returns more than needed but easiest way to ensure it catches all
# types of animations
return True
def branchinspect(self, branch):
# inspect the NiAVObject branch, and NiControllerSequence
# branch (for kf files)
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.NiTimeController,
NifFormat.NiInterpolator,
NifFormat.NiControllerManager,
NifFormat.NiControllerSequence,
NifFormat.NiKeyframeData,
NifFormat.NiTextKeyExtraData,
NifFormat.NiFloatData))
def branchentry(self, branch):
def scale_key_times(keys):
"""Helper function to scale key times."""
for key in keys:
key.time *= self.toaster.animation_scale
if isinstance(branch, NifFormat.NiKeyframeData):
self.changed = True
if branch.rotation_type == 4:
scale_key_times(branch.xyz_rotations[0].keys)
scale_key_times(branch.xyz_rotations[1].keys)
scale_key_times(branch.xyz_rotations[2].keys)
else:
scale_key_times(branch.quaternion_keys)
scale_key_times(branch.translations.keys)
scale_key_times(branch.scales.keys)
# no children of NiKeyframeData so no need to recurse further
return False
elif isinstance(branch, NifFormat.NiControllerSequence):
self.changed = True
branch.stop_time *= self.toaster.animation_scale
# recurse further into children of NiControllerSequence
return True
elif isinstance(branch, NifFormat.NiTextKeyExtraData):
self.changed = True
scale_key_times(branch.text_keys)
# no children of NiTextKeyExtraData so no need to recurse further
return False
elif isinstance(branch, NifFormat.NiTimeController):
self.changed = True
branch.stop_time *= self.toaster.animation_scale
# recurse further into children of NiTimeController
return True
elif isinstance(branch, NifFormat.NiFloatData):
self.changed = True
scale_key_times(branch.data.keys)
# no children of NiFloatData so no need to recurse further
return False
else:
# recurse further
return True
class SpellReverseAnimation(NifSpell):
"""Reverses the animation by reversing datas in relation to the time."""
SPELLNAME = "modify_reverseanimation"
READONLY = False
def datainspect(self):
# returns more than needed but easiest way to ensure it catches all
# types of animations
return True
def branchinspect(self, branch):
# inspect the NiAVObject branch, and NiControllerSequence
# branch (for kf files)
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.NiTimeController,
NifFormat.NiInterpolator,
NifFormat.NiControllerManager,
NifFormat.NiControllerSequence,
NifFormat.NiKeyframeData,
NifFormat.NiTextKeyExtraData,
NifFormat.NiFloatData))
def branchentry(self, branch):
def reverse_keys(keys):
"""Helper function to reverse keys."""
# copy the values
key_values = [key.value for key in keys]
# reverse them
for key, new_value in zip(keys, reversed(key_values)):
key.value = new_value
if isinstance(branch, NifFormat.NiKeyframeData):
self.changed = True
# (this also covers NiTransformData)
if branch.rotation_type == 4:
reverse_keys(branch.xyz_rotations[0].keys)
reverse_keys(branch.xyz_rotations[1].keys)
reverse_keys(branch.xyz_rotations[2].keys)
else:
reverse_keys(branch.quaternion_keys)
reverse_keys(branch.translations.keys)
reverse_keys(branch.scales.keys)
# no children of NiTransformData so no need to recurse further
return False
elif isinstance(branch, NifFormat.NiTextKeyExtraData):
self.changed = True
reverse_keys(branch.text_keys)
# no children of NiTextKeyExtraData so no need to recurse further
return False
elif isinstance(branch, NifFormat.NiFloatData):
self.changed = True
reverse_keys(branch.data.keys)
# no children of NiFloatData so no need to recurse further
return False
else:
# recurse further
return True
class SpellCollisionMaterial(NifSpell):
"""Sets the object's collision material to be a different type"""
SPELLNAME = "modify_collisionmaterial"
READONLY = False
class CollisionMaterialStone:
material = 0
class CollisionMaterialCloth:
material = 1
class CollisionMaterialMetal:
material = 5
COLLISION_MATERIAL_DICT = {
"stone": CollisionMaterialStone,
"cloth": CollisionMaterialCloth,
"metal": CollisionMaterialMetal
}
@classmethod
def toastentry(cls, toaster):
try:
toaster.col_material = cls.COLLISION_MATERIAL_DICT[toaster.options["arg"]]
except KeyError:
# incorrect arg
toaster.logger.warn(
"must specify collision material to change to as argument "
"(e.g. -a stone (accepted names: %s) "
"to apply spell"
% ", ".join(iter(cls.COLLISION_MATERIAL_DICT.keys())))
return False
else:
return True
def datainspect(self):
return self.inspectblocktype(NifFormat.bhkShape)
def branchinspect(self, branch):
# only inspect the NiAVObject branch
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.bhkCollisionObject,
NifFormat.bhkRigidBody,
NifFormat.bhkShape))
def branchentry(self, branch):
if isinstance(branch, NifFormat.bhkShape):
self.changed = True
branch.material = self.toaster.col_material.material
self.toaster.msg("collision material set to %s" % self.toaster.options["arg"])
# bhkPackedNiTriStripsShape could be further down, so keep looking
return True
elif isinstance(branch, NifFormat.bhkPackedNiTriStripsShape):
self.changed = True
for subshape in branch.get_sub_shapes():
subshape.material = self.toaster.col_type.material
self.toaster.msg("collision material set to %s" % self.toaster.options["arg"])
# all extra blocks here done; no need to recurse further
return False
else:
# recurse further
return True
class SpellDelBranches(NifSpell):
"""Delete blocks that match the exclude list."""
SPELLNAME = "modify_delbranches"
READONLY = False
def is_branch_to_be_deleted(self, branch):
"""Returns ``True`` for those branches that must be deleted.
The default implementation returns ``True`` for branches that
are not admissible as specified by include/exclude options of
the toaster. Override in subclasses that must delete specific
branches.
"""
# check if it is excluded or not
return not self.toaster.is_admissible_branch_class(branch.__class__)
def _branchinspect(self, branch):
"""This spell inspects every branch, also the non-admissible ones,
therefore we must override this method.
"""
return True
def branchentry(self, branch):
"""Strip branch if it is flagged for deletion.
"""
# check if it is to be deleted or not
if self.is_branch_to_be_deleted(branch):
# it is, wipe it out
self.toaster.msg("stripping this branch")
self.data.replace_global_node(branch, None)
self.changed = True
# do not recurse further
return False
else:
# this one was not excluded, keep recursing
return True
class _SpellDelBranchClasses(SpellDelBranches):
"""Delete blocks that match a given list. Only useful as base class
for other spells.
"""
BRANCH_CLASSES_TO_BE_DELETED = ()
"""List of branch classes that have to be deleted."""
def datainspect(self):
return any(
self.inspectblocktype(branch_class)
for branch_class in self.BRANCH_CLASSES_TO_BE_DELETED)
def is_branch_to_be_deleted(self, branch):
return isinstance(branch, self.BRANCH_CLASSES_TO_BE_DELETED)
class SpellDelVertexColor(SpellDelBranches):
"""Delete vertex color properties and vertex color data."""
SPELLNAME = "modify_delvertexcolor"
def is_branch_to_be_deleted(self, branch):
return isinstance(branch, NifFormat.NiVertexColorProperty)
def datainspect(self):
return self.inspectblocktype(NifFormat.NiTriBasedGeom)
def branchinspect(self, branch):
# only inspect the NiAVObject branch
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.NiTriBasedGeomData,
NifFormat.NiVertexColorProperty))
def branchentry(self, branch):
# delete vertex color property
SpellDelBranches.branchentry(self, branch)
# reset vertex color flags
if isinstance(branch, NifFormat.NiTriBasedGeomData):
if branch.has_vertex_colors:
self.toaster.msg("removing vertex colors")
branch.has_vertex_colors = False
self.changed = True
# no children; no need to recurse further
return False
# recurse further
return True
# identical to niftoaster.py modify_delbranches -x NiVertexColorProperty
# delete?
class SpellDelVertexColorProperty(_SpellDelBranchClasses):
"""Delete vertex color property if it is present."""
SPELLNAME = "modify_delvertexcolorprop"
BRANCH_CLASSES_TO_BE_DELETED = (NifFormat.NiVertexColorProperty,)
# identical to niftoaster.py modify_delbranches -x NiAlphaProperty
# delete?
class SpellDelAlphaProperty(_SpellDelBranchClasses):
"""Delete alpha property if it is present."""
SPELLNAME = "modify_delalphaprop"
BRANCH_CLASSES_TO_BE_DELETED = (NifFormat.NiAlphaProperty,)
# identical to niftoaster.py modify_delbranches -x NiSpecularProperty
# delete?
class SpellDelSpecularProperty(_SpellDelBranchClasses):
"""Delete specular property if it is present."""
SPELLNAME = "modify_delspecularprop"
BRANCH_CLASSES_TO_BE_DELETED = (NifFormat.NiSpecularProperty,)
# identical to niftoaster.py modify_delbranches -x BSXFlags
# delete?
class SpellDelBSXFlags(_SpellDelBranchClasses):
"""Delete BSXFlags if any are present."""
SPELLNAME = "modify_delbsxflags"
BRANCH_CLASSES_TO_BE_DELETED = (NifFormat.BSXFlags,)
# identical to niftoaster.py modify_delbranches -x NiStringExtraData
# delete?
class SpellDelStringExtraDatas(_SpellDelBranchClasses):
"""Delete NiSringExtraDatas if they are present."""
SPELLNAME = "modify_delstringextradatas"
BRANCH_CLASSES_TO_BE_DELETED = (NifFormat.NiStringExtraData,)
class SpellDelSkinShapes(SpellDelBranches):
"""Delete any geometries with a material name of 'skin'"""
SPELLNAME = "modify_delskinshapes"
def is_branch_to_be_deleted(self, branch):
if isinstance(branch, NifFormat.NiTriBasedGeom):
for prop in branch.get_properties():
if isinstance(prop, NifFormat.NiMaterialProperty):
if prop.name.lower() == "skin":
# skin material, tag for deletion
return True
# do not delete anything else
return False
def branchinspect(self, branch):
# only inspect the NiAVObject branch
return isinstance(branch, NifFormat.NiAVObject)
# identical to niftoaster.py modify_delbranches -x NiCollisionObject
# delete?
class SpellDelCollisionData(_SpellDelBranchClasses):
"""Deletes any Collision data present."""
SPELLNAME = "modify_delcollision"
BRANCH_CLASSES_TO_BE_DELETED = (NifFormat.NiCollisionObject,)
# identical to niftoaster.py modify_delbranches -x NiTimeController
# delete?
class SpellDelAnimation(_SpellDelBranchClasses):
"""Deletes any animation data present."""
SPELLNAME = "modify_delanimation"
BRANCH_CLASSES_TO_BE_DELETED = (NifFormat.NiTimeController,)
class SpellDisableParallax(NifSpell):
"""Disable parallax shader (for Oblivion, but may work on other nifs too).
"""
SPELLNAME = "modify_disableparallax"
READONLY = False
def datainspect(self):
# XXX should we check that the nif is Oblivion version?
# only run the spell if there are textures
return self.inspectblocktype(NifFormat.NiTexturingProperty)
def branchinspect(self, branch):
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.NiTexturingProperty))
def branchentry(self, branch):
if isinstance(branch, NifFormat.NiTexturingProperty):
# is parallax enabled?
if branch.apply_mode == 4:
# yes!
self.toaster.msg("disabling parallax shader")
branch.apply_mode = 2
self.changed = True
# stop recursing
return False
else:
# keep recursing
return True
class SpellAddStencilProperty(NifSpell):
"""Adds a NiStencilProperty to each geometry if it is not present."""
SPELLNAME = "modify_addstencilprop"
READONLY = False
def datainspect(self):
return self.inspectblocktype(NifFormat.NiTriBasedGeom)
def branchinspect(self, branch):
# only inspect the NiAVObject branch
return isinstance(branch, NifFormat.NiAVObject)
def branchentry(self, branch):
if isinstance(branch, NifFormat.NiTriBasedGeom):
# does this block have an stencil property?
for prop in branch.get_properties():
if isinstance(prop, NifFormat.NiStencilProperty):
return False
# no stencil property found
self.toaster.msg("adding NiStencilProperty")
branch.add_property(NifFormat.NiStencilProperty())
self.changed = True
# no geometry children, no need to recurse further
return False
# recurse further
return True
# note: this should go into the optimize module
# but we have to put it here to avoid circular dependencies
class SpellCleanFarNif(
pyffi.spells.SpellGroupParallel(
SpellDelVertexColorProperty,
SpellDelAlphaProperty,
SpellDelSpecularProperty,
SpellDelBSXFlags,
SpellDelStringExtraDatas,
pyffi.spells.nif.fix.SpellDelTangentSpace,
SpellDelCollisionData,
SpellDelAnimation,
SpellDisableParallax)):
"""Spell to clean _far type nifs (for even more optimizations,
combine this with the optimize spell).
"""
SPELLNAME = "modify_cleanfarnif"
# only apply spell on _far files
def datainspect(self):
return self.stream.name.endswith('_far.nif')
# TODO: implement via modify_delbranches?
# this is like SpellCleanFarNif but with changing the texture path
# and optimizing the geometry
class SpellMakeFarNif(
pyffi.spells.SpellGroupParallel(
SpellDelVertexColorProperty,
SpellDelAlphaProperty,
SpellDelSpecularProperty,
SpellDelBSXFlags,
SpellDelStringExtraDatas,
pyffi.spells.nif.fix.SpellDelTangentSpace,
SpellDelCollisionData,
SpellDelAnimation,
SpellDisableParallax,
SpellLowResTexturePath)):
#TODO: implement vert decreaser.
"""Spell to make _far type nifs (for even more optimizations,
combine this with the optimize spell).
"""
SPELLNAME = "modify_makefarnif"
class SpellMakeSkinlessNif(
pyffi.spells.SpellGroupSeries(
pyffi.spells.SpellGroupParallel(
SpellDelSkinShapes,
SpellAddStencilProperty)
)):
"""Spell to make fleshless CMR (Custom Model Races)
clothing/armour type nifs.
"""
SPELLNAME = "modify_makeskinlessnif"
class SpellSubstituteStringPalette(
pyffi.spells.nif.fix.SpellCleanStringPalette):
"""Substitute strings in a string palette."""
SPELLNAME = "modify_substitutestringpalette"
@classmethod
def toastentry(cls, toaster):
arg = toaster.options["arg"]
if not arg:
# missing arg
toaster.logger.warn(
"must specify regular expression and substitution as argument "
"(e.g. -a /Bip01/Bip02) to apply spell")
return False
dummy, toaster.regex, toaster.sub = arg.split(arg[0])
toaster.sub = _as_bytes(toaster.sub)
toaster.regex = re.compile(_as_bytes(toaster.regex))
return True
def substitute(self, old_string):
"""Returns modified string, and reports if string was modified.
"""
if not old_string:
# leave empty strings be
return old_string
new_string = self.toaster.regex.sub(self.toaster.sub, old_string)
if old_string != new_string:
self.changed = True
self.toaster.msg("%s -> %s" % (old_string, new_string))
return new_string
class SpellChangeBonePriorities(NifSpell):
"""Changes controlled block priorities based on controlled block name."""
SPELLNAME = "modify_bonepriorities"
READONLY = False
@classmethod
def toastentry(cls, toaster):
if not toaster.options["arg"]:
toaster.logger.warn(
"must specify bone(s) and priority(ies) as argument "
"(e.g. -a 'bip01:50|bip01 spine:10') to apply spell "
"make sure all bone names in lowercase")
return False
else:
toaster.bone_priorities = dict(
(name.lower(), int(priority))
for (name, priority) in (
namepriority.split(":")
for namepriority in toaster.options["arg"].split("|")))
return True
def datainspect(self):
# returns only if nif/kf contains NiSequence
return self.inspectblocktype(NifFormat.NiSequence)
def branchinspect(self, branch):
# inspect the NiAVObject and NiSequence branches
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.NiControllerManager,
NifFormat.NiSequence))
def branchentry(self, branch):
if isinstance(branch, NifFormat.NiSequence):
for controlled_block in branch.controlled_blocks:
try:
controlled_block.priority = self.toaster.bone_priorities[
controlled_block.get_node_name().lower()]
except KeyError:
# node name not in bone priority list
continue
self.changed = True
self.toaster.msg("%s priority changed to %d" %
(controlled_block.get_node_name(),
controlled_block.priority))
return True
class SpellChangeAllBonePriorities(SpellChangeBonePriorities):
"""Changes all controlled block priorities to supplied argument."""
SPELLNAME = "modify_allbonepriorities"
@classmethod
def toastentry(cls, toaster):
if not toaster.options["arg"]:
toaster.logger.warn(
"must specify priority as argument (e.g. -a 20)")
return False
else:
toaster.bone_priority = int(toaster.options["arg"])
return True
def branchentry(self, branch):
if isinstance(branch, NifFormat.NiSequence):
for controlled_block in branch.controlled_blocks:
if controlled_block.priority == self.toaster.bone_priority:
self.toaster.msg("%s priority is already %d" %
(controlled_block.get_node_name(),
controlled_block.priority))
else:
controlled_block.priority = self.toaster.bone_priority
self.changed = True
self.toaster.msg("%s priority changed to %d" %
(controlled_block.get_node_name(),
controlled_block.priority))
return True
# should go in dump, but is the counterpart of modify_setbonepriorities
# therefore maintained here
class SpellGetBonePriorities(NifSpell):
"""For each file.nif, dump bone priorites to
file_bonepriorities.txt.
"""
SPELLNAME = "modify_getbonepriorities"
def datainspect(self):
# continue only if nif/kf contains NiSequence
return self.inspectblocktype(NifFormat.NiSequence)
def dataentry(self):
# maps squence name and block name to priority
self.bonepriorities = {}
return True
def branchinspect(self, branch):
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.NiControllerManager,
NifFormat.NiSequence))
def branchentry(self, branch):
if isinstance(branch, NifFormat.NiSequence):
bonepriorities = {}
for controlled_block in branch.controlled_blocks:
name = controlled_block.get_node_name().decode()
priority = controlled_block.priority
if name not in bonepriorities:
bonepriorities[name] = priority
#self.toaster.msg("noted %r priority %i" % (name, priority))
elif bonepriorities[name] != priority:
self.toaster.logger.warn(
"multiple priorities for %r" % name)
self.toaster.logger.warn(
"(using %i, ignoring %i)"
% (self.bonepriorities[name], priority))
sequence = branch.name.decode()
if sequence not in self.bonepriorities:
self.bonepriorities[sequence] = bonepriorities
else:
self.toaster.logger.warn(
"multiple sequences named %r,"
" only the first will be recorded" % sequence)
return True
@staticmethod
def key(value):
"""Strip ' R ' and ' L ' from name so they occur together in list."""
name, priority = value
return re.sub("( R )|( L )", "", name)
def dataexit(self):
filename, ext = os.path.splitext(self.stream.name)
filename = filename + "_bonepriorities.txt"
self.toaster.msg("writing %s" % filename)
with codecs.open(filename, "wb", encoding="ascii") as stream:
for sequence, bonepriorities in self.bonepriorities.items():
print("[%s]" % sequence, file=stream, end="\r\n")
for name, priority in sorted(bonepriorities.items(),
key=self.key):
print("%s=%i" % (name, priority), file=stream, end="\r\n")
self.bonepriorities = {}
class SpellSetBonePriorities(NifSpell):
"""For each file.nif, restore bone priorites from
file_bonepriorities.txt.
"""
SPELLNAME = "modify_setbonepriorities"
READONLY = False
def datainspect(self):
# returns only if nif/kf contains NiSequence
return self.inspectblocktype(NifFormat.NiSequence)
def dataentry(self):
filename, ext = os.path.splitext(self.stream.name)
filename = filename + "_bonepriorities.txt"
if os.path.exists(filename):
self.toaster.msg("reading %s" % filename)
with codecs.open(filename, "rb", encoding="ascii") as stream:
self.bonepriorities = {} # priorities for all sequences
sequence = "" # current sequence
bonepriorities = {} # priorities for current sequence
for line in stream:
line = line.rstrip('\r\n')
m = re.match("\\[(.*)\\]$", line)
if m:
if sequence:
self.bonepriorities[sequence] = bonepriorities
sequence = m.group(1)
bonepriorities = {}
else:
m = re.match("(.*)=([0-9]+)$", line)
if not m:
self.toaster.logger.warn("syntax error in %r" % line)
bonepriorities[m.group(1)] = int(m.group(2))
if sequence:
self.bonepriorities[sequence] = bonepriorities
return True
else:
self.toaster.msg("%s not found, skipping" % filename)
return False
def branchinspect(self, branch):
# inspect the NiAVObject and NiSequence branches
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.NiControllerManager,
NifFormat.NiSequence))
def branchentry(self, branch):
if isinstance(branch, NifFormat.NiSequence):
sequence = branch.name.decode()
if sequence not in self.bonepriorities:
self.toaster.logger.warn(
"sequence %r not listed, skipped" % sequence)
return False
bonepriorities = self.bonepriorities[sequence]
for controlled_block in branch.controlled_blocks:
name = controlled_block.get_node_name().decode()
if name in bonepriorities:
priority = bonepriorities[name]
if priority != controlled_block.priority:
self.toaster.msg("setting %r priority to %i (was %i)"
% (name, priority,
controlled_block.priority))
controlled_block.priority = priority
self.changed = True
else:
self.toaster.msg("%r priority already at %i"
% (name, priority))
else:
self.toaster.logger.warn(
"%r in nif file but not in priority file" % name)
return True
class SpellSetInterpolatorTransRotScale(NifSpell):
"""Changes specified bone(s) translations/rotations in their
NiTransformInterpolator.
"""
SPELLNAME = "modify_interpolatortransrotscale"
READONLY = False
@classmethod
def toastentry(cls, toaster):
if not toaster.options["arg"]:
toaster.logger.warn(
"must specify bone(s), translation and rotation for each"
" bone as argument (e.g."
" -a 'bip01:1,2,3;0,0,0,1;1|bip01 spine2:0,0,0;1,0,0,0.5;1')"
" to apply spell; make sure all bone names are lowercase,"
" first three numbers being translation,"
" next three being rotation,"
" last being scale;"
" enter X to leave existing value for that value")
return False
else:
def _float(x):
if x == "X":
return None
else:
return float(x)
toaster.interp_transforms = dict(
(name.lower(), ([_float(x) for x in trans.split(",")],
[_float(x) for x in rot.split(",")],
_float(scale)))
for (name, (trans, rot, scale)) in (
(name, transrotscale.split(";"))
for (name, transrotscale) in (
name_transrotscale.split(":")
for name_transrotscale
in toaster.options["arg"].split("|"))))
return True
def datainspect(self):
# returns only if nif/kf contains NiSequence
return self.inspectblocktype(NifFormat.NiSequence)
def branchinspect(self, branch):
# inspect the NiAVObject and NiSequence branches
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.NiSequence))
def branchentry(self, branch):
if isinstance(branch, NifFormat.NiSequence):
for controlled_block in branch.controlled_blocks:
try:
(transx, transy, transz), (quatx, quaty, quatz, quatw), scale = self.toaster.interp_transforms[controlled_block.get_node_name().lower()]
except KeyError:
# node name not in change list
continue
interp = controlled_block.interpolator
if transx is not None:
interp.translation.x = transx
if transy is not None:
interp.translation.y = transy
if transz is not None:
interp.translation.z = transz
if quatx is not None:
interp.rotation.x = quatx
if quaty is not None:
interp.rotation.y = quaty
if quatz is not None:
interp.rotation.z = quatz
if quatw is not None:
interp.rotation.w = quatw
if scale is not None:
interp.scale = scale
self.changed = True
self.toaster.msg(
"%s rotated/translated/scaled as per argument"
% (controlled_block.get_node_name()))
return True
class SpellDelInterpolatorTransformData(NifSpell):
"""Deletes the specified bone(s) NiTransformData(s)."""
SPELLNAME = "modify_delinterpolatortransformdata"
READONLY = False
@classmethod
def toastentry(cls, toaster):
if not toaster.options["arg"]:
toaster.logger.warn(
"must specify bone name(s) as argument "
"(e.g. -a 'bip01|bip01 pelvis') to apply spell "
"make sure all bone name(s) in lowercase")
return False
else:
toaster.change_blocks = toaster.options["arg"].split('|')
return True
def datainspect(self):
# returns only if nif/kf contains NiSequence
return self.inspectblocktype(NifFormat.NiSequence)
def branchinspect(self, branch):
# inspect the NiAVObject and NiSequence branches
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.NiSequence))
def branchentry(self, branch):
if isinstance(branch, NifFormat.NiSequence):
for controlled_block in branch.controlled_blocks:
if controlled_block.get_node_name().lower() in self.toaster.change_blocks:
self.data.replace_global_node(controlled_block.interpolator.data, None)
self.toaster.msg("NiTransformData removed from interpolator for %s" % (controlled_block.get_node_name()))
self.changed = True
return True
class SpellCollisionToMopp(NifSpell):
"""Transforms non-mopp triangle collisions to the more efficient mopps."""
SPELLNAME = "modify_collisiontomopp"
READONLY = False
def datainspect(self):
return self.inspectblocktype(NifFormat.bhkRigidBody)
def branchinspect(self, branch):
# only inspect the NiAVObject branch
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.bhkCollisionObject,
NifFormat.bhkRigidBody))
def branchentry(self, branch):
if isinstance(branch, NifFormat.bhkRigidBody):
if isinstance(branch.shape, (NifFormat.bhkNiTriStripsShape,
NifFormat.bhkPackedNiTriStripsShape)):
colmopp = NifFormat.bhkMoppBvTreeShape()
colmopp.material = branch.shape.material
colmopp.unknown_8_bytes[0] = 160
colmopp.unknown_8_bytes[1] = 13
colmopp.unknown_8_bytes[2] = 75
colmopp.unknown_8_bytes[3] = 1
colmopp.unknown_8_bytes[4] = 192
colmopp.unknown_8_bytes[5] = 207
colmopp.unknown_8_bytes[6] = 144
colmopp.unknown_8_bytes[7] = 11
colmopp.unknown_float = 1.0
if isinstance(branch.shape, NifFormat.bhkNiTriStripsShape):
branch.shape = branch.shape.get_interchangeable_packed_shape()
colmopp.shape = branch.shape
branch.shape = colmopp
self.changed = True
branch.shape.update_mopp()
self.toaster.msg("collision set to MOPP")
# Don't need to recurse further
return False
else:
# recurse further
return True
class SpellMirrorAnimation(NifSpell):
"""Mirrors the animation by switching bones and mirroring their x values.
Only useable on creature/character animations (well any animations
as long as they have bones in the form of bip01/2 L ...).
"""
SPELLNAME = "modify_mirroranimation"
READONLY = False
def datainspect(self):
# returns more than needed but easiest way to ensure it catches all
# types of animations
return True
def dataentry(self):
# make list of used bones
self.old_bone_data = {}
for branch in self.data.get_global_iterator():
if isinstance(branch, NifFormat.NiControllerSequence):
for block in branch.controlled_blocks:
name = block.get_node_name().lower()
if ' r ' in name or ' l ' in name:
self.old_bone_data[name] = [block.interpolator, block.controller, block.priority, block.string_palette, block.node_name_offset, block.controller_type_offset]
if self.old_bone_data:
return True
def branchinspect(self, branch):
# inspect the NiAVObject branch, and NiControllerSequence
# branch (for kf files)
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.NiTimeController,
NifFormat.NiInterpolator,
NifFormat.NiControllerManager,
NifFormat.NiControllerSequence))
def branchentry(self, branch):
old_bone_data = self.old_bone_data
if isinstance(branch, NifFormat.NiControllerSequence):
for block in branch.controlled_blocks:
node_name = block.get_node_name().lower()
if ' l ' in node_name: node_name = node_name.replace(' l ', ' r ')
elif ' r ' in node_name: node_name = node_name.replace(' r ', ' l ')
if node_name in old_bone_data:
self.changed = True
block.interpolator, block.controller, block.priority, block.string_palette, block.node_name_offset, block.controller_type_offset = old_bone_data[node_name]
# and then reverse x movements (since otherwise the movement of f.e. an arm towards the center of the body will be still in the same direction but away from the body
if not block.interpolator: continue
ip = block.interpolator
ip.translation.x = -ip.translation.x
ip.rotation.x = -ip.rotation.x
if ip.data:
data = ip.data
if data.translations.num_keys:
for key in data.translations.keys:
key.value.x = -key.value.x
if data.rotation_type == 4:
if data.xyz_rotations[1].num_keys != 0:
for key in data.xyz_rotations[1].keys:
key.value = -key.value
elif data.num_rotation_keys != 0:
for key in data.quaternion_keys:
key.value.x = -key.value.x
else:
# recurse further
return True
| {
"content_hash": "b7449ae77d4c78d49e4bd0f3481e4be7",
"timestamp": "",
"source": "github",
"line_count": 1272,
"max_line_length": 185,
"avg_line_length": 38.2185534591195,
"alnum_prop": 0.5918048298843954,
"repo_name": "griest024/PokyrimTools",
"id": "9ca3d8357f4af1dcfa4750d5721701e6704fb3b2",
"size": "48614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyffi-develop/pyffi/spells/nif/modify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1205978"
},
{
"name": "C++",
"bytes": "6318739"
},
{
"name": "CMake",
"bytes": "19319"
},
{
"name": "CSS",
"bytes": "1542"
},
{
"name": "Groff",
"bytes": "2506"
},
{
"name": "HTML",
"bytes": "3154887"
},
{
"name": "Inno Setup",
"bytes": "45620"
},
{
"name": "Java",
"bytes": "129878"
},
{
"name": "Makefile",
"bytes": "18242"
},
{
"name": "NSIS",
"bytes": "29228"
},
{
"name": "Objective-C",
"bytes": "9061"
},
{
"name": "Python",
"bytes": "2406969"
},
{
"name": "Shell",
"bytes": "41141"
}
],
"symlink_target": ""
} |
"""
sphinx.errors
~~~~~~~~~~~~~
Contains SphinxError and a few subclasses (in an extra module to avoid
circular import problems).
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import traceback
class SphinxError(Exception):
"""
Base class for Sphinx errors that are shown to the user in a nicer
way than normal exceptions.
"""
category = 'Sphinx error'
class SphinxWarning(SphinxError):
"""Raised for warnings if warnings are treated as errors."""
category = 'Warning, treated as error'
class ExtensionError(SphinxError):
"""Raised if something's wrong with the configuration."""
category = 'Extension error'
def __init__(self, message, orig_exc=None):
SphinxError.__init__(self, message)
self.orig_exc = orig_exc
def __repr__(self):
if self.orig_exc:
return '%s(%r, %r)' % (self.__class__.__name__,
self.message, self.orig_exc)
return '%s(%r)' % (self.__class__.__name__, self.message)
def __str__(self):
parent_str = SphinxError.__str__(self)
if self.orig_exc:
return '%s (exception: %s)' % (parent_str, self.orig_exc)
return parent_str
class ConfigError(SphinxError):
category = 'Configuration error'
class ThemeError(SphinxError):
category = 'Theme error'
class VersionRequirementError(SphinxError):
category = 'Sphinx version error'
class PycodeError(Exception):
def __str__(self):
res = self.args[0]
if len(self.args) > 1:
res += ' (exception was: %r)' % self.args[1]
return res
class SphinxParallelError(Exception):
def __init__(self, orig_exc, traceback):
self.orig_exc = orig_exc
self.traceback = traceback
def __str__(self):
return traceback.format_exception_only(
self.orig_exc.__class__, self.orig_exc)[0].strip()
| {
"content_hash": "d266c2ead2773a91205041f3f105e9f6",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 74,
"avg_line_length": 26.236842105263158,
"alnum_prop": 0.6093279839518556,
"repo_name": "neerajvashistha/pa-dude",
"id": "8d695c19001c9e8f6678c2b5969007a58525634d",
"size": "2018",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/sphinx/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "359307"
},
{
"name": "C++",
"bytes": "5695"
},
{
"name": "CSS",
"bytes": "114504"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "HTML",
"bytes": "216904"
},
{
"name": "JavaScript",
"bytes": "1323680"
},
{
"name": "Makefile",
"bytes": "2299"
},
{
"name": "Python",
"bytes": "31341230"
},
{
"name": "Self",
"bytes": "40307"
},
{
"name": "Shell",
"bytes": "5427"
},
{
"name": "TeX",
"bytes": "96790"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import logging
import os
import gobject
import pygst
pygst.require('0.10')
import gst # noqa
import gst.pbutils # noqa
import pykka
from mopidy import exceptions
from mopidy.audio import playlists, utils
from mopidy.audio.constants import PlaybackState
from mopidy.audio.listener import AudioListener
from mopidy.internal import deprecation, process
logger = logging.getLogger(__name__)
# This logger is only meant for debug logging of low level gstreamer info such
# as callbacks, event, messages and direct interaction with GStreamer such as
# set_state on a pipeline.
gst_logger = logging.getLogger('mopidy.audio.gst')
playlists.register_typefinders()
playlists.register_elements()
_GST_STATE_MAPPING = {
gst.STATE_PLAYING: PlaybackState.PLAYING,
gst.STATE_PAUSED: PlaybackState.PAUSED,
gst.STATE_NULL: PlaybackState.STOPPED}
class _Signals(object):
"""Helper for tracking gobject signal registrations"""
def __init__(self):
self._ids = {}
def connect(self, element, event, func, *args):
"""Connect a function + args to signal event on an element.
Each event may only be handled by one callback in this implementation.
"""
assert (element, event) not in self._ids
self._ids[(element, event)] = element.connect(event, func, *args)
def disconnect(self, element, event):
"""Disconnect whatever handler we have for and element+event pair.
Does nothing it the handler has already been removed.
"""
signal_id = self._ids.pop((element, event), None)
if signal_id is not None:
element.disconnect(signal_id)
def clear(self):
"""Clear all registered signal handlers."""
for element, event in self._ids.keys():
element.disconnect(self._ids.pop((element, event)))
# TODO: expose this as a property on audio?
class _Appsrc(object):
"""Helper class for dealing with appsrc based playback."""
def __init__(self):
self._signals = _Signals()
self.reset()
def reset(self):
"""Reset the helper.
Should be called whenever the source changes and we are not setting up
a new appsrc.
"""
self.prepare(None, None, None, None)
def prepare(self, caps, need_data, enough_data, seek_data):
"""Store info we will need when the appsrc element gets installed."""
self._signals.clear()
self._source = None
self._caps = caps
self._need_data_callback = need_data
self._seek_data_callback = seek_data
self._enough_data_callback = enough_data
def configure(self, source):
"""Configure the supplied source for use.
Should be called whenever we get a new appsrc.
"""
source.set_property('caps', self._caps)
source.set_property('format', b'time')
source.set_property('stream-type', b'seekable')
source.set_property('max-bytes', 1 << 20) # 1MB
source.set_property('min-percent', 50)
if self._need_data_callback:
self._signals.connect(source, 'need-data', self._on_signal,
self._need_data_callback)
if self._seek_data_callback:
self._signals.connect(source, 'seek-data', self._on_signal,
self._seek_data_callback)
if self._enough_data_callback:
self._signals.connect(source, 'enough-data', self._on_signal, None,
self._enough_data_callback)
self._source = source
def push(self, buffer_):
if self._source is None:
return False
if buffer_ is None:
gst_logger.debug('Sending appsrc end-of-stream event.')
return self._source.emit('end-of-stream') == gst.FLOW_OK
else:
return self._source.emit('push-buffer', buffer_) == gst.FLOW_OK
def _on_signal(self, element, clocktime, func):
# This shim is used to ensure we always return true, and also handles
# that not all the callbacks have a time argument.
if clocktime is None:
func()
else:
func(utils.clocktime_to_millisecond(clocktime))
return True
# TODO: expose this as a property on audio when #790 gets further along.
class _Outputs(gst.Bin):
def __init__(self):
gst.Bin.__init__(self, 'outputs')
self._tee = gst.element_factory_make('tee')
self.add(self._tee)
ghost_pad = gst.GhostPad('sink', self._tee.get_pad('sink'))
self.add_pad(ghost_pad)
# Add an always connected fakesink which respects the clock so the tee
# doesn't fail even if we don't have any outputs.
fakesink = gst.element_factory_make('fakesink')
fakesink.set_property('sync', True)
self._add(fakesink)
def add_output(self, description):
# XXX This only works for pipelines not in use until #790 gets done.
try:
output = gst.parse_bin_from_description(
description, ghost_unconnected_pads=True)
except gobject.GError as ex:
logger.error(
'Failed to create audio output "%s": %s', description, ex)
raise exceptions.AudioException(bytes(ex))
self._add(output)
logger.info('Audio output set to "%s"', description)
def _add(self, element):
queue = gst.element_factory_make('queue')
self.add(element)
self.add(queue)
queue.link(element)
self._tee.link(queue)
class SoftwareMixer(object):
pykka_traversable = True
def __init__(self, mixer):
self._mixer = mixer
self._element = None
self._last_volume = None
self._last_mute = None
self._signals = _Signals()
def setup(self, element, mixer_ref):
self._element = element
self._mixer.setup(mixer_ref)
def teardown(self):
self._signals.clear()
self._mixer.teardown()
def get_volume(self):
return int(round(self._element.get_property('volume') * 100))
def set_volume(self, volume):
self._element.set_property('volume', volume / 100.0)
self._mixer.trigger_volume_changed(self.get_volume())
def get_mute(self):
return self._element.get_property('mute')
def set_mute(self, mute):
self._element.set_property('mute', bool(mute))
self._mixer.trigger_mute_changed(self.get_mute())
class _Handler(object):
def __init__(self, audio):
self._audio = audio
self._element = None
self._pad = None
self._message_handler_id = None
self._event_handler_id = None
def setup_message_handling(self, element):
self._element = element
bus = element.get_bus()
bus.add_signal_watch()
self._message_handler_id = bus.connect('message', self.on_message)
def setup_event_handling(self, pad):
self._pad = pad
self._event_handler_id = pad.add_event_probe(self.on_event)
def teardown_message_handling(self):
bus = self._element.get_bus()
bus.remove_signal_watch()
bus.disconnect(self._message_handler_id)
self._message_handler_id = None
def teardown_event_handling(self):
self._pad.remove_event_probe(self._event_handler_id)
self._event_handler_id = None
def on_message(self, bus, msg):
if msg.type == gst.MESSAGE_STATE_CHANGED and msg.src == self._element:
self.on_playbin_state_changed(*msg.parse_state_changed())
elif msg.type == gst.MESSAGE_BUFFERING:
self.on_buffering(msg.parse_buffering(), msg.structure)
elif msg.type == gst.MESSAGE_EOS:
self.on_end_of_stream()
elif msg.type == gst.MESSAGE_ERROR:
self.on_error(*msg.parse_error())
elif msg.type == gst.MESSAGE_WARNING:
self.on_warning(*msg.parse_warning())
elif msg.type == gst.MESSAGE_ASYNC_DONE:
self.on_async_done()
elif msg.type == gst.MESSAGE_TAG:
self.on_tag(msg.parse_tag())
elif msg.type == gst.MESSAGE_ELEMENT:
if gst.pbutils.is_missing_plugin_message(msg):
self.on_missing_plugin(msg)
def on_event(self, pad, event):
if event.type == gst.EVENT_NEWSEGMENT:
self.on_new_segment(*event.parse_new_segment())
elif event.type == gst.EVENT_SINK_MESSAGE:
# Handle stream changed messages when they reach our output bin.
# If we listen for it on the bus we get one per tee branch.
msg = event.parse_sink_message()
if msg.structure.has_name('playbin2-stream-changed'):
self.on_stream_changed(msg.structure['uri'])
return True
def on_playbin_state_changed(self, old_state, new_state, pending_state):
gst_logger.debug('Got state-changed message: old=%s new=%s pending=%s',
old_state.value_name, new_state.value_name,
pending_state.value_name)
if new_state == gst.STATE_READY and pending_state == gst.STATE_NULL:
# XXX: We're not called on the last state change when going down to
# NULL, so we rewrite the second to last call to get the expected
# behavior.
new_state = gst.STATE_NULL
pending_state = gst.STATE_VOID_PENDING
if pending_state != gst.STATE_VOID_PENDING:
return # Ignore intermediate state changes
if new_state == gst.STATE_READY:
return # Ignore READY state as it's GStreamer specific
new_state = _GST_STATE_MAPPING[new_state]
old_state, self._audio.state = self._audio.state, new_state
target_state = _GST_STATE_MAPPING[self._audio._target_state]
if target_state == new_state:
target_state = None
logger.debug('Audio event: state_changed(old_state=%s, new_state=%s, '
'target_state=%s)', old_state, new_state, target_state)
AudioListener.send('state_changed', old_state=old_state,
new_state=new_state, target_state=target_state)
if new_state == PlaybackState.STOPPED:
logger.debug('Audio event: stream_changed(uri=None)')
AudioListener.send('stream_changed', uri=None)
if 'GST_DEBUG_DUMP_DOT_DIR' in os.environ:
gst.DEBUG_BIN_TO_DOT_FILE(
self._audio._playbin, gst.DEBUG_GRAPH_SHOW_ALL, 'mopidy')
def on_buffering(self, percent, structure=None):
if structure and structure.has_field('buffering-mode'):
if structure['buffering-mode'] == gst.BUFFERING_LIVE:
return # Live sources stall in paused.
level = logging.getLevelName('TRACE')
if percent < 10 and not self._audio._buffering:
self._audio._playbin.set_state(gst.STATE_PAUSED)
self._audio._buffering = True
level = logging.DEBUG
if percent == 100:
self._audio._buffering = False
if self._audio._target_state == gst.STATE_PLAYING:
self._audio._playbin.set_state(gst.STATE_PLAYING)
level = logging.DEBUG
gst_logger.log(level, 'Got buffering message: percent=%d%%', percent)
def on_end_of_stream(self):
gst_logger.debug('Got end-of-stream message.')
logger.debug('Audio event: reached_end_of_stream()')
self._audio._tags = {}
AudioListener.send('reached_end_of_stream')
def on_error(self, error, debug):
gst_logger.error(str(error).decode('utf-8'))
if debug:
gst_logger.debug(debug.decode('utf-8'))
# TODO: is this needed?
self._audio.stop_playback()
def on_warning(self, error, debug):
gst_logger.warning(str(error).decode('utf-8'))
if debug:
gst_logger.debug(debug.decode('utf-8'))
def on_async_done(self):
gst_logger.debug('Got async-done.')
def on_tag(self, taglist):
tags = utils.convert_taglist(taglist)
self._audio._tags.update(tags)
logger.debug('Audio event: tags_changed(tags=%r)', tags.keys())
AudioListener.send('tags_changed', tags=tags.keys())
def on_missing_plugin(self, msg):
desc = gst.pbutils.missing_plugin_message_get_description(msg)
debug = gst.pbutils.missing_plugin_message_get_installer_detail(msg)
gst_logger.debug('Got missing-plugin message: description:%s', desc)
logger.warning('Could not find a %s to handle media.', desc)
if gst.pbutils.install_plugins_supported():
logger.info('You might be able to fix this by running: '
'gst-installer "%s"', debug)
# TODO: store the missing plugins installer info in a file so we can
# can provide a 'mopidy install-missing-plugins' if the system has the
# required helper installed?
def on_new_segment(self, update, rate, format_, start, stop, position):
gst_logger.debug('Got new-segment event: update=%s rate=%s format=%s '
'start=%s stop=%s position=%s', update, rate,
format_.value_name, start, stop, position)
position_ms = position // gst.MSECOND
logger.debug('Audio event: position_changed(position=%s)', position_ms)
AudioListener.send('position_changed', position=position_ms)
def on_stream_changed(self, uri):
gst_logger.debug('Got stream-changed message: uri=%s', uri)
logger.debug('Audio event: stream_changed(uri=%s)', uri)
AudioListener.send('stream_changed', uri=uri)
# TODO: create a player class which replaces the actors internals
class Audio(pykka.ThreadingActor):
"""
Audio output through `GStreamer <http://gstreamer.freedesktop.org/>`_.
"""
#: The GStreamer state mapped to :class:`mopidy.audio.PlaybackState`
state = PlaybackState.STOPPED
#: The software mixing interface :class:`mopidy.audio.actor.SoftwareMixer`
mixer = None
def __init__(self, config, mixer):
super(Audio, self).__init__()
self._config = config
self._target_state = gst.STATE_NULL
self._buffering = False
self._tags = {}
self._playbin = None
self._outputs = None
self._about_to_finish_callback = None
self._handler = _Handler(self)
self._appsrc = _Appsrc()
self._signals = _Signals()
if mixer and self._config['audio']['mixer'] == 'software':
self.mixer = SoftwareMixer(mixer)
def on_start(self):
try:
self._setup_preferences()
self._setup_playbin()
self._setup_outputs()
self._setup_audio_sink()
except gobject.GError as ex:
logger.exception(ex)
process.exit_process()
def on_stop(self):
self._teardown_mixer()
self._teardown_playbin()
def _setup_preferences(self):
# TODO: move out of audio actor?
# Fix for https://github.com/mopidy/mopidy/issues/604
registry = gst.registry_get_default()
jacksink = registry.find_feature(
'jackaudiosink', gst.TYPE_ELEMENT_FACTORY)
if jacksink:
jacksink.set_rank(gst.RANK_SECONDARY)
def _setup_playbin(self):
playbin = gst.element_factory_make('playbin2')
playbin.set_property('flags', 2) # GST_PLAY_FLAG_AUDIO
# TODO: turn into config values...
playbin.set_property('buffer-size', 5 << 20) # 5MB
playbin.set_property('buffer-duration', 5 * gst.SECOND)
self._signals.connect(playbin, 'source-setup', self._on_source_setup)
self._signals.connect(playbin, 'about-to-finish',
self._on_about_to_finish)
self._playbin = playbin
self._handler.setup_message_handling(playbin)
def _teardown_playbin(self):
self._handler.teardown_message_handling()
self._handler.teardown_event_handling()
self._signals.disconnect(self._playbin, 'about-to-finish')
self._signals.disconnect(self._playbin, 'source-setup')
self._playbin.set_state(gst.STATE_NULL)
def _setup_outputs(self):
# We don't want to use outputs for regular testing, so just install
# an unsynced fakesink when someone asks for a 'testoutput'.
if self._config['audio']['output'] == 'testoutput':
self._outputs = gst.element_factory_make('fakesink')
else:
self._outputs = _Outputs()
try:
self._outputs.add_output(self._config['audio']['output'])
except exceptions.AudioException:
process.exit_process() # TODO: move this up the chain
self._handler.setup_event_handling(self._outputs.get_pad('sink'))
def _setup_audio_sink(self):
audio_sink = gst.Bin('audio-sink')
# Queue element to buy us time between the about to finish event and
# the actual switch, i.e. about to switch can block for longer thanks
# to this queue.
# TODO: make the min-max values a setting?
queue = gst.element_factory_make('queue')
queue.set_property('max-size-buffers', 0)
queue.set_property('max-size-bytes', 0)
queue.set_property('max-size-time', 3 * gst.SECOND)
queue.set_property('min-threshold-time', 1 * gst.SECOND)
audio_sink.add(queue)
audio_sink.add(self._outputs)
if self.mixer:
volume = gst.element_factory_make('volume')
audio_sink.add(volume)
queue.link(volume)
volume.link(self._outputs)
self.mixer.setup(volume, self.actor_ref.proxy().mixer)
else:
queue.link(self._outputs)
ghost_pad = gst.GhostPad('sink', queue.get_pad('sink'))
audio_sink.add_pad(ghost_pad)
self._playbin.set_property('audio-sink', audio_sink)
def _teardown_mixer(self):
if self.mixer:
self.mixer.teardown()
def _on_about_to_finish(self, element):
gst_logger.debug('Got about-to-finish event.')
if self._about_to_finish_callback:
logger.debug('Running about to finish callback.')
self._about_to_finish_callback()
def _on_source_setup(self, element, source):
gst_logger.debug('Got source-setup: element=%s', source)
if source.get_factory().get_name() == 'appsrc':
self._appsrc.configure(source)
else:
self._appsrc.reset()
utils.setup_proxy(source, self._config['proxy'])
def set_uri(self, uri):
"""
Set URI of audio to be played.
You *MUST* call :meth:`prepare_change` before calling this method.
:param uri: the URI to play
:type uri: string
"""
# XXX: Hack to workaround issue on Mac OS X where volume level
# does not persist between track changes. mopidy/mopidy#886
if self.mixer is not None:
current_volume = self.mixer.get_volume()
else:
current_volume = None
self._tags = {} # TODO: add test for this somehow
self._playbin.set_property('uri', uri)
if self.mixer is not None and current_volume is not None:
self.mixer.set_volume(current_volume)
def set_appsrc(
self, caps, need_data=None, enough_data=None, seek_data=None):
"""
Switch to using appsrc for getting audio to be played.
You *MUST* call :meth:`prepare_change` before calling this method.
:param caps: GStreamer caps string describing the audio format to
expect
:type caps: string
:param need_data: callback for when appsrc needs data
:type need_data: callable which takes data length hint in ms
:param enough_data: callback for when appsrc has enough data
:type enough_data: callable
:param seek_data: callback for when data from a new position is needed
to continue playback
:type seek_data: callable which takes time position in ms
"""
self._appsrc.prepare(
gst.Caps(bytes(caps)), need_data, enough_data, seek_data)
self._playbin.set_property('uri', 'appsrc://')
def emit_data(self, buffer_):
"""
Call this to deliver raw audio data to be played.
If the buffer is :class:`None`, the end-of-stream token is put on the
playbin. We will get a GStreamer message when the stream playback
reaches the token, and can then do any end-of-stream related tasks.
Note that the URI must be set to ``appsrc://`` for this to work.
Returns :class:`True` if data was delivered.
:param buffer_: buffer to pass to appsrc
:type buffer_: :class:`gst.Buffer` or :class:`None`
:rtype: boolean
"""
return self._appsrc.push(buffer_)
def emit_end_of_stream(self):
"""
Put an end-of-stream token on the playbin. This is typically used in
combination with :meth:`emit_data`.
We will get a GStreamer message when the stream playback reaches the
token, and can then do any end-of-stream related tasks.
.. deprecated:: 1.0
Use :meth:`emit_data` with a :class:`None` buffer instead.
"""
deprecation.warn('audio.emit_end_of_stream')
self._appsrc.push(None)
def set_about_to_finish_callback(self, callback):
"""
Configure audio to use an about-to-finish callback.
This should be used to achieve gapless playback. For this to work the
callback *MUST* call :meth:`set_uri` with the new URI to play and
block until this call has been made. :meth:`prepare_change` is not
needed before :meth:`set_uri` in this one special case.
:param callable callback: Callback to run when we need the next URI.
"""
self._about_to_finish_callback = callback
def get_position(self):
"""
Get position in milliseconds.
:rtype: int
"""
try:
gst_position = self._playbin.query_position(gst.FORMAT_TIME)[0]
return utils.clocktime_to_millisecond(gst_position)
except gst.QueryError:
# TODO: take state into account for this and possibly also return
# None as the unknown value instead of zero?
logger.debug('Position query failed')
return 0
def set_position(self, position):
"""
Set position in milliseconds.
:param position: the position in milliseconds
:type position: int
:rtype: :class:`True` if successful, else :class:`False`
"""
# TODO: double check seek flags in use.
gst_position = utils.millisecond_to_clocktime(position)
result = self._playbin.seek_simple(
gst.Format(gst.FORMAT_TIME), gst.SEEK_FLAG_FLUSH, gst_position)
gst_logger.debug('Sent flushing seek: position=%s', gst_position)
return result
def start_playback(self):
"""
Notify GStreamer that it should start playback.
:rtype: :class:`True` if successfull, else :class:`False`
"""
return self._set_state(gst.STATE_PLAYING)
def pause_playback(self):
"""
Notify GStreamer that it should pause playback.
:rtype: :class:`True` if successfull, else :class:`False`
"""
return self._set_state(gst.STATE_PAUSED)
def prepare_change(self):
"""
Notify GStreamer that we are about to change state of playback.
This function *MUST* be called before changing URIs or doing
changes like updating data that is being pushed. The reason for this
is that GStreamer will reset all its state when it changes to
:attr:`gst.STATE_READY`.
"""
return self._set_state(gst.STATE_READY)
def stop_playback(self):
"""
Notify GStreamer that is should stop playback.
:rtype: :class:`True` if successfull, else :class:`False`
"""
self._buffering = False
return self._set_state(gst.STATE_NULL)
def wait_for_state_change(self):
"""Block until any pending state changes are complete.
Should only be used by tests.
"""
self._playbin.get_state()
def enable_sync_handler(self):
"""Enable manual processing of messages from bus.
Should only be used by tests.
"""
def sync_handler(bus, message):
self._handler.on_message(bus, message)
return gst.BUS_DROP
bus = self._playbin.get_bus()
bus.set_sync_handler(sync_handler)
def _set_state(self, state):
"""
Internal method for setting the raw GStreamer state.
.. digraph:: gst_state_transitions
graph [rankdir="LR"];
node [fontsize=10];
"NULL" -> "READY"
"PAUSED" -> "PLAYING"
"PAUSED" -> "READY"
"PLAYING" -> "PAUSED"
"READY" -> "NULL"
"READY" -> "PAUSED"
:param state: State to set playbin to. One of: `gst.STATE_NULL`,
`gst.STATE_READY`, `gst.STATE_PAUSED` and `gst.STATE_PLAYING`.
:type state: :class:`gst.State`
:rtype: :class:`True` if successfull, else :class:`False`
"""
self._target_state = state
result = self._playbin.set_state(state)
gst_logger.debug('State change to %s: result=%s', state.value_name,
result.value_name)
if result == gst.STATE_CHANGE_FAILURE:
logger.warning(
'Setting GStreamer state to %s failed', state.value_name)
return False
# TODO: at this point we could already emit stopped event instead
# of faking it in the message handling when result=OK
return True
# TODO: bake this into setup appsrc perhaps?
def set_metadata(self, track):
"""
Set track metadata for currently playing song.
Only needs to be called by sources such as `appsrc` which do not
already inject tags in playbin, e.g. when using :meth:`emit_data` to
deliver raw audio data to GStreamer.
:param track: the current track
:type track: :class:`mopidy.models.Track`
"""
taglist = gst.TagList()
artists = [a for a in (track.artists or []) if a.name]
# Default to blank data to trick shoutcast into clearing any previous
# values it might have.
taglist[gst.TAG_ARTIST] = ' '
taglist[gst.TAG_TITLE] = ' '
taglist[gst.TAG_ALBUM] = ' '
if artists:
taglist[gst.TAG_ARTIST] = ', '.join([a.name for a in artists])
if track.name:
taglist[gst.TAG_TITLE] = track.name
if track.album and track.album.name:
taglist[gst.TAG_ALBUM] = track.album.name
event = gst.event_new_tag(taglist)
# TODO: check if we get this back on our own bus?
self._playbin.send_event(event)
gst_logger.debug('Sent tag event: track=%s', track.uri)
def get_current_tags(self):
"""
Get the currently playing media's tags.
If no tags have been found, or nothing is playing this returns an empty
dictionary. For each set of tags we collect a tags_changed event is
emitted with the keys of the changes tags. After such calls users may
call this function to get the updated values.
:rtype: {key: [values]} dict for the current media.
"""
# TODO: should this be a (deep) copy? most likely yes
# TODO: should we return None when stopped?
# TODO: support only fetching keys we care about?
return self._tags
| {
"content_hash": "7189efc4e31ab6b8f86081ea0cee6078",
"timestamp": "",
"source": "github",
"line_count": 776,
"max_line_length": 79,
"avg_line_length": 36.123711340206185,
"alnum_prop": 0.6062357305936074,
"repo_name": "rawdlite/mopidy",
"id": "72750bdf6d3d80025ed87715eeb3665799665fbe",
"size": "28032",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "mopidy/audio/actor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "610"
},
{
"name": "Groff",
"bytes": "573"
},
{
"name": "HTML",
"bytes": "805"
},
{
"name": "JavaScript",
"bytes": "82060"
},
{
"name": "Python",
"bytes": "1108001"
},
{
"name": "Shell",
"bytes": "556"
}
],
"symlink_target": ""
} |
from django.http import Http404
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from pressgang.accounts.decorators import can_manage_blogs
from pressgang.actions import clear_action, retrieve_action
from pressgang.utils.ajax import ajax_view, AjaxError
from pressgang.utils.pages import Page
@can_manage_blogs
def execute_action(request, **kwargs):
"""A page that executes an action.
For this view to be called, there must be a cached Action-descended instance
available as a session variable available through the 'current_action' key.
"""
action = retrieve_action(request)
if not action:
raise Http404
page = Page(request)
page.add_render_args({
'action': action,
'blog': action.blog
})
return page.render(action.execute_template)
@can_manage_blogs
@ajax_view
def begin_action(request):
"""Begins executing the current cached action."""
action = retrieve_action(request)
if not action:
raise AjaxError(_("No action could be found in the session."))
# Execute the action asynchronously
try:
action.execute_async()
except Exception, e:
raise AjaxError(_("Unable to start the action %(action)s: %(error)s") % {'error': e, 'action': action.display_name})
return {}
@can_manage_blogs
@ajax_view
def action_progress(request):
"""Fetches an update on the progress of an action.
This is used to display feedback to the user on the action, as this
view is called in a loop while the action is being performed.
"""
action = retrieve_action(request)
if not action:
raise AjaxError(_("No action could be found in the session"))
# Refresh the action's record to avoid using a stale record
try:
record = action.Record.objects.get(pk=action.record.pk)
except action.Record.DoesNotExist:
raise AjaxError(_("No action record could be found."))
# Clear the cached action if it has been completed
if record.is_ended:
clear_action(request)
# Generate the markup and pass its length to the client so that it can
# determine if the contents have changed
log_markup = render_to_string(action.log_template, {
'action': action,
'blog': action.blog,
'log': record.log,
'record': record,
'succeeded': record.succeeded,
'user': request.user})
markup_size = len(log_markup)
return {
'ended': record.is_ended,
'failed': record.is_failed,
'markup': {'log': log_markup},
'size': markup_size
}
| {
"content_hash": "9e1505e0f4410fd7c711ee00339e4ea0",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 118,
"avg_line_length": 28.423529411764704,
"alnum_prop": 0.7330298013245033,
"repo_name": "cilcoberlin/pressgang",
"id": "20731a4fbf21cbd74df45d2320b0e98722ecf723",
"size": "2417",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pressgang/actions/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "8867"
},
{
"name": "PHP",
"bytes": "17602"
},
{
"name": "Python",
"bytes": "190495"
}
],
"symlink_target": ""
} |
import os
import re
import sys
import urllib
import BeautifulSoup
import json
#import simplejson
from models import Song as AppSong
import random
from django.http import HttpResponse
import logging
from django.core import serializers
URLS_LIST = ['http://pigeonsandplanes.com/', 'http://www.2dopeboyz.com/', 'http://agrumpyoldmanwithabeard.blogspot.com/', 'http://www.cocaineblunts.com/blunts/?page_id=1074']
URLS_LIST2 = ['http://3hive.com']
URLS_LIST3 = ['http://fakeshoredrive.com/','http://earmilk.com','http://passionweiss.com','http://creamteam.tv',]
MAX_DEPTH = 6
LOG_FILENAME = '/home/hiphopgoblin/logs/user/debug.log'
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
def song(url, text):
if url[-4:] == '.mp3':
try:
s = AppSong(url=url, title=text, count=0)
s.save()
logging.debug('song saved')
except:
logging.debug('song found, could not be saved')
return (url, text)
def site(url, text):
if url[-4:] != '.mp3' and url[:7] == 'http://':
logging.debug(url)
return url
def strip_tags(string):
tag = re.compile(r'<.*?>')
return tag.sub(' ', string)
def separate(lst):
songs = [song(x,y) for x,y in lst]
sites = [site(x,y) for x,y in lst]
return (sites, songs)
class Node(object):
def __init__(self, url, depth, top):
self.url = url
self.children = []
self.songs = []
self.depth = depth
self.topNode = top
def openResources(self):
try:
f = urllib.urlopen(self.url)
logging.debug('urllib worked')
return BeautifulSoup.BeautifulSoup(f.read())
except:
logging.debug('rsrcs opening fail ')
return False
def getUrls(self):
link_list = []
soup = self.openResources()
if soup:
logging.debug('returned a soup obj')
links = soup.findAll('a')
for l in links:
try:
link_list.append((l['href'], strip_tags((' ').join(str(t) for t in l.contents))))
logging.debug('appended' + l['href'])
except:
logging.debug('could not append')
return link_list
def visit(self, scraper):
logging.debug('visited')
if self.depth <= MAX_DEPTH:
urls_list, self.songs = separate(self.getUrls())
self.children = [UnderNode(parent=self, top=self.topNode, url=x) for x in urls_list]
self.pushSongs(scraper)
for x in self.children:
x.visit(scraper)
else:
return
def pushSongs(self, scraper):
scraper.songs = [scraper.songs.append(s) for s in self.songs]
class TopNode(Node):
def __init__(self, url):
super(TopNode, self).__init__(url, 0, top=self)
class UnderNode(Node):
def __init__(self, parent, top, url):
super(UnderNode, self).__init__(url, parent.depth + 1, top)
self.topNode = top
def pushSongs(self, scraper):
scraper.songs = [scraper.songs.append(s) for s in self.songs]
class Scraper:
def __init__(self, urls):
self.songs = []
self.topNodes = [TopNode(x) for x in urls]
def collectSongs(self):
self.songs = [x.songs for x in self.topNodes]
return self.songs
class Song:
def __init__(self, url, text):
self.url = url
self.text = text
def __unicode__(self):
return self.url + " --- " + self.text
def scrape(songid=None):
#scraper = Scraper(URLS_LIST)
#for node in scraper.topNodes:
#node.visit()
#for song in scraper.collectSongs():
#print song
#print
json_serializer = serializers.get_serializer("json")()
if songid:
s = AppSong.objects.get(id=songid)
else:
count = len(AppSong.objects.all())
index = random.randint(1,count)
s = AppSong.objects.all()[index-1]
#json.write({"filename":s.url, "title":s.title, "count":s.count,"id":s.id,})
return json.write({"filename":s.url, "title":s.title, "count":s.count,"id":s.id,})
def main():
scraper = Scraper(URLS_LIST)
for node in scraper.topNodes:
node.visit(scraper)
def getsongs(request):
logging.debug('called getsongs')
scraper = Scraper(URLS_LIST3)
for node in scraper.topNodes:
node.visit(scraper)
return HttpResponse()
| {
"content_hash": "b028f9ab28267413edf96bd56b7c4c04",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 174,
"avg_line_length": 25.68211920529801,
"alnum_prop": 0.6743166580711707,
"repo_name": "zackster/HipHopGoblin",
"id": "c470aef539d90cdccb2922677f7153ffe3bc35c4",
"size": "3878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hhg/hhg_app/scraper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "83166"
},
{
"name": "JavaScript",
"bytes": "812261"
},
{
"name": "Python",
"bytes": "4377875"
},
{
"name": "Shell",
"bytes": "328"
}
],
"symlink_target": ""
} |
import unittest
import IECore
import sys
sys.path.append( "test/IECoreNuke" )
from KnobAccessorsTest import *
from FnAxisTest import *
from DeepImageReaderTest import *
from LensDistortTest import *
from StringUtilTest import *
from KnobConvertersTest import *
from ParameterisedHolderTest import ParameterisedHolderTest
from ObjectKnobTest import ObjectKnobTest
from OpHolderTest import OpHolderTest
from SceneCacheReaderTest import SceneCacheReaderTest
if IECore.withPNG() :
from PNGReaderTest import PNGReaderTest
unittest.TestProgram(
testRunner = unittest.TextTestRunner(
stream = IECore.CompoundStream(
[
sys.stderr,
open( "test/IECoreNuke/resultsPython.txt", "w" )
]
),
verbosity = 2
)
)
| {
"content_hash": "9123fa37913373a962636c18d4f73e20",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 59,
"avg_line_length": 23.29032258064516,
"alnum_prop": 0.796398891966759,
"repo_name": "hradec/cortex",
"id": "64d049fa890c8e59b4c5b27d24c111efee14b79d",
"size": "2511",
"binary": false,
"copies": "3",
"ref": "refs/heads/testing",
"path": "test/IECoreNuke/All.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "70350"
},
{
"name": "C++",
"bytes": "11602345"
},
{
"name": "CMake",
"bytes": "14161"
},
{
"name": "GLSL",
"bytes": "31098"
},
{
"name": "Mathematica",
"bytes": "255937"
},
{
"name": "Objective-C",
"bytes": "21989"
},
{
"name": "Python",
"bytes": "5076729"
},
{
"name": "Slash",
"bytes": "8583"
},
{
"name": "Tcl",
"bytes": "1796"
}
],
"symlink_target": ""
} |
from test import test_support
import socket
import time
# Optionally test SSL support. This requires the 'network' resource as given
# on the regrtest command line.
skip_expected = not (test_support.is_resource_enabled('network') and
hasattr(socket, "ssl"))
def test_basic():
test_support.requires('network')
import urllib
socket.RAND_status()
try:
socket.RAND_egd(1)
except TypeError:
pass
else:
print "didn't raise TypeError"
socket.RAND_add("this is a random string", 75.0)
f = urllib.urlopen('https://sf.net')
buf = f.read()
f.close()
def test_rude_shutdown():
# This test deadlocks, see http://bugs.jython.org/issue1049
if test_support.is_jython:
return
try:
import thread
except ImportError:
return
# some random port to connect to
PORT = 9934
def listener():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', PORT))
s.listen(5)
s.accept()
del s
thread.exit()
def connector():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('localhost', PORT))
try:
ssl_sock = socket.ssl(s)
except socket.sslerror:
pass
else:
raise test_support.TestFailed, \
'connecting to closed SSL socket failed'
thread.start_new_thread(listener, ())
time.sleep(1)
connector()
def test_main():
if not hasattr(socket, "ssl"):
raise test_support.TestSkipped("socket module has no ssl support")
test_rude_shutdown()
test_basic()
if __name__ == "__main__":
test_main()
| {
"content_hash": "181488128c900cbc3e8f19288483eeed",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 77,
"avg_line_length": 24.81159420289855,
"alnum_prop": 0.5922897196261683,
"repo_name": "mosbasik/buzhug",
"id": "194c395940791b8be8ccee37b38834d542a486f9",
"size": "1790",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "javasrc/lib/Jython/Lib/test/test_socket_ssl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1391"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "595726"
},
{
"name": "Java",
"bytes": "475421"
},
{
"name": "Makefile",
"bytes": "1224"
},
{
"name": "Python",
"bytes": "10675193"
},
{
"name": "R",
"bytes": "752"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name = 'Flat',
description = 'Generative infrastructure for Python',
version = '0.3.2',
packages = ['flat'],
author = 'Juraj Sukop',
author_email = 'contact@xxyxyz.org',
url = 'https://xxyxyz.org/flat',
license = 'MIT',
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Artistic Software',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Graphics :: 3D Modeling',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Multimedia :: Graphics :: Editors',
'Topic :: Multimedia :: Graphics :: Editors :: Raster-Based',
'Topic :: Multimedia :: Graphics :: Editors :: Vector-Based',
'Topic :: Multimedia :: Graphics :: Graphics Conversion',
'Topic :: Multimedia :: Graphics :: Viewers',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing',
'Topic :: Text Processing :: Fonts']) | {
"content_hash": "17513cae3f1fd8f69e48268c41187f59",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 71,
"avg_line_length": 44.55882352941177,
"alnum_prop": 0.594059405940594,
"repo_name": "xxyxyz/flat",
"id": "eece443d754a2c2ca0c1214fc7eb5f305b903784",
"size": "1515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "223145"
}
],
"symlink_target": ""
} |
import shutil
import os
import unittest
from q2_types.feature_table import (BIOMV100Format, BIOMV210Format,
BIOMV100DirFmt, BIOMV210DirFmt)
from qiime2.plugin.testing import TestPluginBase
class TestFormats(TestPluginBase):
package = 'q2_types.feature_table.tests'
def test_biomv100_format_validate_positive(self):
filepath = self.get_data_path('feature-table_v100.biom')
format = BIOMV100Format(filepath, mode='r')
format.validate()
def test_biomv100_format_validate_negative(self):
filepath = self.get_data_path('feature-table_v210.biom')
format = BIOMV100Format(filepath, mode='r')
with self.assertRaisesRegex(ValueError, 'BIOMV100Format'):
format.validate()
def test_biomv210_format_validate_positive(self):
filepath = self.get_data_path('feature-table_v210.biom')
format = BIOMV210Format(filepath, mode='r')
format.validate()
def test_biomv210_format_validate_negative(self):
filepath = self.get_data_path('feature-table_v100.biom')
format = BIOMV210Format(filepath, mode='r')
with self.assertRaisesRegex(ValueError, 'BIOMV210Format'):
format.validate()
def test_biomv100_dir_format_validate_positive(self):
filepath = self.get_data_path('feature-table_v100.biom')
shutil.copy(filepath,
os.path.join(self.temp_dir.name, 'feature-table.biom'))
format = BIOMV100DirFmt(self.temp_dir.name, mode='r')
format.validate()
def test_biomv210_dir_format_validate_positive(self):
filepath = self.get_data_path('feature-table_v210.biom')
shutil.copy(filepath,
os.path.join(self.temp_dir.name, 'feature-table.biom'))
format = BIOMV210DirFmt(self.temp_dir.name, mode='r')
format.validate()
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "0735c99ad332377885145ae163ff4eaf",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 75,
"avg_line_length": 33.91228070175438,
"alnum_prop": 0.6554578375581996,
"repo_name": "jairideout/q2-types",
"id": "6435563f9ab1b6a12ad79dd7541257722ee079e6",
"size": "2283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "q2_types/feature_table/tests/test_format.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "225158"
}
],
"symlink_target": ""
} |
"""Tools for managing evaluation contexts. """
from sympy.utilities.iterables import dict_merge
from sympy.polys.polyutils import PicklableWithSlots
__known_options__ = set(['frac', 'gens', 'wrt', 'sort', 'order', 'domain',
'modulus', 'gaussian', 'extension', 'field', 'greedy', 'symmetric'])
__global_options__ = []
__template__ = """\
def %(option)s(_%(option)s):
return Context(%(option)s=_%(option)s)
"""
for option in __known_options__:
exec __template__ % { 'option': option }
class Context(PicklableWithSlots):
__slots__ = ['__options__']
def __init__(self, dict=None, **options):
if dict is not None:
self.__options__ = dict_merge(dict, options)
else:
self.__options__ = options
def __getattribute__(self, name):
if name in __known_options__:
try:
return object.__getattribute__(self, '__options__')[name]
except KeyError:
return None
else:
return object.__getattribute__(self, name)
def __str__(self):
return 'Context(%s)' % ', '.join(
[ '%s=%r' % (key, value) for key, value in self.__options__.iteritems() ])
def __and__(self, other):
if isinstance(other, Context):
return Context(**dict_merge(self.__options__, other.__options__))
else:
raise TypeError("a context manager expected, got %s" % other)
def __enter__(self):
raise NotImplementedError('global context')
def __exit__(self, exc_type, exc_val, exc_tb):
raise NotImplementedError('global context')
def register_context(func):
def wrapper(self, *args, **kwargs):
return func(*args, **dict_merge(self.__options__, kwargs))
wrapper.__doc__ = func.__doc__
wrapper.__name__ = func.__name__
setattr(Context, func.__name__, wrapper)
return func
| {
"content_hash": "286c2c63123e551aae2ae717653b8fed",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 86,
"avg_line_length": 29.138461538461538,
"alnum_prop": 0.5712777191129884,
"repo_name": "amitjamadagni/sympy",
"id": "81030e0904e8ac13595c3316bdf4acd7d9dc3527",
"size": "1894",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sympy/polys/polycontext.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12199014"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "287"
},
{
"name": "TeX",
"bytes": "8789"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
"""Fetch a git repository and restart the webapp service."""
import os, sys
__version__ = None
if __name__ == '__main__':
webapp_path = sys.argv[1]
os.chdir(webapp_path)
python_executable = os.path.join(
os.path.dirname(webapp_path), 'bin', 'python')
cmd = [ python_executable, './manage.py', 'pullapp' ]
sys.stderr.write('run: %s' % ' '.join(cmd))
sys.exit(os.system(' '.join(cmd)))
| {
"content_hash": "610950744d939f691590958c5ee42dfc",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 60,
"avg_line_length": 29.928571428571427,
"alnum_prop": 0.5990453460620525,
"repo_name": "djaodjin/djaodjin-deployutils",
"id": "efcf1024101f0cef74c5a585c5e105aee5065292",
"size": "1785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/dpull.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "801"
},
{
"name": "HTML",
"bytes": "3530"
},
{
"name": "Makefile",
"bytes": "2702"
},
{
"name": "Python",
"bytes": "239749"
},
{
"name": "Shell",
"bytes": "2523"
}
],
"symlink_target": ""
} |
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account = os.environ['TWILIO_ACCOUNT_SID']
token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account, token)
binding = client.notify.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.bindings("BSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
print(binding.sid)
| {
"content_hash": "223407b75beb0c3d68c382426d81e12b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 72,
"avg_line_length": 33.61538461538461,
"alnum_prop": 0.7780320366132724,
"repo_name": "TwilioDevEd/api-snippets",
"id": "561a140d20540f0885990599baf3a399844191cb",
"size": "628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notifications/rest/bindings/retrieve-binding/retrieve-binding.7.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "637161"
},
{
"name": "C++",
"bytes": "24856"
},
{
"name": "Go",
"bytes": "7217"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "912474"
},
{
"name": "JavaScript",
"bytes": "512877"
},
{
"name": "M",
"bytes": "147"
},
{
"name": "Objective-C",
"bytes": "53325"
},
{
"name": "PHP",
"bytes": "517186"
},
{
"name": "Python",
"bytes": "442184"
},
{
"name": "Ruby",
"bytes": "438928"
},
{
"name": "Shell",
"bytes": "3854"
},
{
"name": "Swift",
"bytes": "42345"
},
{
"name": "TypeScript",
"bytes": "16767"
}
],
"symlink_target": ""
} |
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='sysenv',
version='0.1.0',
description='Simple handling of system environment variables for application deployment.',
long_description=readme + '\n\n' + history,
author='Ben Lopatin',
author_email='ben@wellfire.co',
url='https://github.com/bennylope/sysenv',
packages=[
'sysenv',
],
package_dir={'sysenv': 'sysenv'},
include_package_data=True,
install_requires=[
],
license="BSD",
zip_safe=False,
keywords='sysenv',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
test_suite='tests',
)
| {
"content_hash": "8647a5375ea4da4b884399e0f01fce74",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 94,
"avg_line_length": 27.340425531914892,
"alnum_prop": 0.6093385214007782,
"repo_name": "bennylope/sysenv",
"id": "03d74d23931247ef1a9da72ca0aa4608f1a4d9a4",
"size": "1332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "17017"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_sharnaff_bull.iff"
result.attribute_template_id = 9
result.stfName("monster_name","sharnaff")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "415bf6889e00a81708a23d08dabff614",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 59,
"avg_line_length": 22.307692307692307,
"alnum_prop": 0.6931034482758621,
"repo_name": "anhstudios/swganh",
"id": "14ac5c66428d464975876d3633162af84910d8c5",
"size": "435",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_sharnaff_bull.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.conf import settings
from django.core.urlresolvers import reverse
from onadata.apps.main.views import delete_data
from onadata.apps.viewer.models.parsed_instance import ParsedInstance
from onadata.apps.logger.models.instance import Instance
from onadata.libs.utils import common_tags
from test_base import TestBase
class TestFormAPIDelete(TestBase):
def setUp(self):
TestBase.setUp(self)
self._create_user_and_login()
self._publish_transportation_form_and_submit_instance()
self.delete_url = reverse(delete_data, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
self.mongo_args = {
'username': self.user.username, 'id_string': self.xform.id_string,
'query': "{}", 'limit': 1,
'sort': '{"_id":-1}', 'fields': '["_id","_uuid"]'}
def _get_data(self):
cursor = ParsedInstance.query_mongo(**self.mongo_args)
records = list(record for record in cursor)
return records
def test_get_request_does_not_delete(self):
# not allowed 405
count = Instance.objects.filter(deleted_at=None).count()
response = self.anon.get(self.delete_url)
self.assertEqual(response.status_code, 405)
self.assertEqual(
Instance.objects.filter(deleted_at=None).count(), count)
def test_anon_user_cant_delete(self):
# Only authenticated user are allowed to access the url
count = Instance.objects.filter(deleted_at=None).count()
instance = Instance.objects.filter(
xform=self.xform).latest('date_created')
# delete
params = {'id': instance.id}
response = self.anon.post(self.delete_url, params)
self.assertEqual(response.status_code, 302)
self.assertIn("accounts/login/?next=", response["Location"])
self.assertEqual(
Instance.objects.filter(deleted_at=None).count(), count)
def test_delete_shared(self):
#Test if someone can delete data from a shared form
self.xform.shared = True
self.xform.save()
self._create_user_and_login("jo")
count = Instance.objects.filter(deleted_at=None).count()
instance = Instance.objects.filter(
xform=self.xform).latest('date_created')
# delete
params = {'id': instance.id}
response = self.client.post(self.delete_url, params)
self.assertEqual(response.status_code, 403)
self.assertEqual(
Instance.objects.filter(deleted_at=None).count(), count)
def test_owner_can_delete(self):
#Test if Form owner can delete
#check record exist before delete and after delete
count = Instance.objects.filter(deleted_at=None).count()
instance = Instance.objects.filter(
xform=self.xform).latest('date_created')
self.assertEqual(instance.deleted_at, None)
# delete
params = {'id': instance.id}
response = self.client.post(self.delete_url, params)
self.assertEqual(response.status_code, 200)
self.assertEqual(
Instance.objects.filter(deleted_at=None).count(), count - 1)
instance = Instance.objects.get(id=instance.id)
self.assertTrue(isinstance(instance.deleted_at, datetime))
self.assertNotEqual(instance.deleted_at, None)
query = '{"_id": %s}' % instance.id
self.mongo_args.update({"query": query})
#check that query_mongo will not return the deleted record
after = ParsedInstance.query_mongo(**self.mongo_args)
self.assertEqual(len(after), count - 1)
def test_delete_updates_mongo(self):
count = Instance.objects.filter(
xform=self.xform, deleted_at=None).count()
instance = Instance.objects.filter(
xform=self.xform).latest('date_created')
# delete
params = {'id': instance.id}
response = self.client.post(self.delete_url, params)
self.assertEqual(response.status_code, 200)
self.assertEqual(
Instance.objects.filter(
xform=self.xform, deleted_at=None).count(), count - 1)
# check that instance's deleted_at is set
instance = Instance.objects.get(id=instance.id)
self.assertTrue(isinstance(instance.deleted_at, datetime))
# check mongo record was marked as deleted
cursor = settings.MONGO_DB.instances.find(
{common_tags.ID: instance.id})
self.assertEqual(cursor.count(), 1)
record = cursor.next()
self.assertIsNotNone(record[common_tags.DELETEDAT])
| {
"content_hash": "ce22ea8fe0c96ec648ae23ec985462a8",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 78,
"avg_line_length": 42.22522522522522,
"alnum_prop": 0.6396415617665885,
"repo_name": "eHealthAfrica/onadata",
"id": "3f3263072861963cedbc57f8c5dfc772299e8d00",
"size": "4687",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "onadata/apps/main/tests/test_form_api_delete.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "96112"
},
{
"name": "JavaScript",
"bytes": "996503"
},
{
"name": "Python",
"bytes": "1841525"
},
{
"name": "Shell",
"bytes": "6107"
}
],
"symlink_target": ""
} |
import json
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from tests.support.decorators import mark_urls
from tests.support.serializers import (
get_artists,
get_albums,
get_tracks,
get_non_default_ids,
)
from tests.support.views import ArtistViewSet, AlbumViewSet, NonDefaultIdViewSet
@mark_urls
def test_detail_attributes(factory: APIRequestFactory) -> None:
"""You can update primary data attributes."""
request = factory.put(
reverse("artist-detail", kwargs={"pk": 1}),
{
"data": {
"id": "1",
"type": "artist",
"attributes": {"firstName": "Art", "lastName": "Blakey"},
}
},
)
view_detail = ArtistViewSet.as_view({"put": "update"})
response = view_detail(request, pk=1)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "1",
"type": "artist",
"attributes": {"firstName": "Art", "lastName": "Blakey"},
}
}
artist = get_artists().get(1)
assert artist.id == 1
assert artist.first_name == "Art"
assert artist.last_name == "Blakey"
@mark_urls
def test_list_attributes(factory: APIRequestFactory) -> None:
"""You can create using primary data attributes."""
request = factory.post(
reverse("artist-list"),
{
"data": {
"type": "artist",
"attributes": {"firstName": "Thelonious", "lastName": "Monk"},
}
},
)
view_list = ArtistViewSet.as_view({"post": "create"})
response = view_list(request)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "6",
"type": "artist",
"attributes": {"firstName": "Thelonious", "lastName": "Monk"},
}
}
artist = get_artists().get(6)
assert artist.id == 6
assert artist.first_name == "Thelonious"
assert artist.last_name == "Monk"
@mark_urls
def test_parse_relationships(factory: APIRequestFactory) -> None:
"""You can parse relationships."""
album_list = AlbumViewSet.as_view({"post": "create"})
artist = get_artists().get(0)
track = get_tracks().get(0)
request = factory.post(
reverse("album-list"),
{
"data": {
"type": "album",
"attributes": {"albumName": "On the Corner"},
"relationships": {
"artist": {"data": {"id": artist.id, "type": "artist"}},
"tracks": {"data": [{"id": track.id, "type": "track"}]},
},
}
},
)
response = album_list(request)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "4",
"type": "album",
"attributes": {"albumName": "On the Corner"},
"relationships": {
"artist": {"data": {"id": str(artist.id), "type": "artist"}},
"tracks": {"data": [{"id": "0", "type": "track"}]},
},
}
}
album = get_albums()[4]
assert album.album_name == "On the Corner"
assert album.artist.id == artist.id
@mark_urls
def test_post_non_default_id(factory: APIRequestFactory) -> None:
"""POSTing with a non-default ID works."""
non_default_list = NonDefaultIdViewSet.as_view({"post": "create"})
request = factory.post(
reverse("non-default-id-list"),
{
"data": {
"id": "foo",
"type": "non-defaults",
"attributes": {"name": "my name"},
}
},
)
response = non_default_list(request)
assert response.status_code == 201
models = get_non_default_ids()
assert models[0].non_default_id == "foo"
| {
"content_hash": "f772669dec77867a3a248c0eea232e4e",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 80,
"avg_line_length": 31.26153846153846,
"alnum_prop": 0.5346948818897638,
"repo_name": "paulcwatts/drf-json-schema",
"id": "81da30f9000dde334fa00d216be3f70e00a10768",
"size": "4064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_parsers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "100673"
}
],
"symlink_target": ""
} |
"""Clustering v1 profile action implementations"""
import logging
import sys
from openstack import exceptions as sdk_exc
from osc_lib.command import command
from osc_lib import exceptions as exc
from osc_lib import utils
from senlinclient.common.i18n import _
from senlinclient.common import utils as senlin_utils
class ShowProfile(command.ShowOne):
"""Show profile details."""
log = logging.getLogger(__name__ + ".ShowProfile")
def get_parser(self, prog_name):
parser = super(ShowProfile, self).get_parser(prog_name)
parser.add_argument(
'profile',
metavar='<profile>',
help='Name or ID of profile to show',
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
senlin_client = self.app.client_manager.clustering
return _show_profile(senlin_client, profile_id=parsed_args.profile)
def _show_profile(senlin_client, profile_id):
try:
data = senlin_client.get_profile(profile_id)
except sdk_exc.ResourceNotFound:
raise exc.CommandError('Profile not found: %s' % profile_id)
else:
formatters = {}
formatters['metadata'] = senlin_utils.json_formatter
formatters['spec'] = senlin_utils.nested_dict_formatter(
['type', 'version', 'properties'],
['property', 'value'])
data = data.to_dict()
columns = sorted(data.keys())
return columns, utils.get_dict_properties(data, columns,
formatters=formatters)
class ListProfile(command.Lister):
"""List profiles that meet the criteria."""
log = logging.getLogger(__name__ + ".ListProfile")
def get_parser(self, prog_name):
parser = super(ListProfile, self).get_parser(prog_name)
parser.add_argument(
'--limit',
metavar='<limit>',
help=_('Limit the number of profiles returned')
)
parser.add_argument(
'--marker',
metavar='<id>',
help=_('Only return profiles that appear after the given profile '
'ID')
)
parser.add_argument(
'--sort',
metavar='<key>[:<direction>]',
help=_("Sorting option which is a string containing a list of keys"
" separated by commas. Each key can be optionally appended "
"by a sort direction (:asc or :desc). The valid sort_keys "
"are:['type', 'name', 'created_at', 'updated_at']")
)
parser.add_argument(
'--filters',
metavar='<"key1=value1;key2=value2...">',
help=_("Filter parameters to apply on returned profiles. "
"This can be specified multiple times, or once with "
"parameters separated by a semicolon. The valid filter "
"keys are: ['type', 'name']"),
action='append'
)
parser.add_argument(
'--global-project',
default=False,
action="store_true",
help=_('Indicate that the list should include profiles from'
' all projects. This option is subject to access policy '
'checking. Default is False')
)
parser.add_argument(
'--full-id',
default=False,
action="store_true",
help=_('Print full IDs in list')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
senlin_client = self.app.client_manager.clustering
columns = ['id', 'name', 'type', 'created_at']
queries = {
'limit': parsed_args.limit,
'marker': parsed_args.marker,
'sort': parsed_args.sort,
'global_project': parsed_args.global_project,
}
if parsed_args.filters:
queries.update(senlin_utils.format_parameters(parsed_args.filters))
data = senlin_client.profiles(**queries)
formatters = {}
if parsed_args.global_project:
columns.append('project_id')
if not parsed_args.full_id:
formatters = {
'id': lambda x: x[:8],
}
if 'project_id' in columns:
formatters['project_id'] = lambda x: x[:8]
return (
columns,
(utils.get_item_properties(p, columns, formatters=formatters)
for p in data)
)
class DeleteProfile(command.Command):
"""Delete profile(s)."""
log = logging.getLogger(__name__ + ".DeleteProfile")
def get_parser(self, prog_name):
parser = super(DeleteProfile, self).get_parser(prog_name)
parser.add_argument(
'profile',
metavar='<profile>',
nargs='+',
help=_('Name or ID of profile(s) to delete')
)
parser.add_argument(
'--force',
action='store_true',
help=_('Skip yes/no prompt (assume yes)')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
senlin_client = self.app.client_manager.clustering
try:
if not parsed_args.force and sys.stdin.isatty():
sys.stdout.write(
_("Are you sure you want to delete this profile(s)"
" [y/N]?"))
prompt_response = sys.stdin.readline().lower()
if not prompt_response.startswith('y'):
return
except KeyboardInterrupt: # Ctrl-c
self.log.info('Ctrl-c detected.')
return
except EOFError: # Ctrl-d
self.log.info('Ctrl-d detected')
return
failure_count = 0
for pid in parsed_args.profile:
try:
senlin_client.delete_profile(pid, False)
except Exception as ex:
failure_count += 1
print(ex)
if failure_count:
raise exc.CommandError(_('Failed to delete %(count)s of the '
'%(total)s specified profile(s).') %
{'count': failure_count,
'total': len(parsed_args.profile)})
print('Profile deleted: %s' % parsed_args.profile)
class CreateProfile(command.ShowOne):
"""Create a profile."""
log = logging.getLogger(__name__ + ".CreateProfile")
def get_parser(self, prog_name):
parser = super(CreateProfile, self).get_parser(prog_name)
parser.add_argument(
'--metadata',
metavar='<"key1=value1;key2=value2...">',
help=_('Metadata values to be attached to the profile. '
'This can be specified multiple times, or once with '
'key-value pairs separated by a semicolon'),
action='append'
)
parser.add_argument(
'--spec-file',
metavar='<spec-file>',
required=True,
help=_('The spec file used to create the profile')
)
parser.add_argument(
'name',
metavar='<profile-name>',
help=_('Name of the profile to create')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
senlin_client = self.app.client_manager.clustering
spec = senlin_utils.get_spec_content(parsed_args.spec_file)
type_name = spec.get('type', None)
type_version = spec.get('version', None)
properties = spec.get('properties', None)
if type_name is None:
raise exc.CommandError(_("Missing 'type' key in spec file."))
if type_version is None:
raise exc.CommandError(_("Missing 'version' key in spec file."))
if properties is None:
raise exc.CommandError(_("Missing 'properties' key in spec file."))
if type_name == 'os.heat.stack':
stack_properties = senlin_utils.process_stack_spec(properties)
spec['properties'] = stack_properties
params = {
'name': parsed_args.name,
'spec': spec,
'metadata': senlin_utils.format_parameters(parsed_args.metadata),
}
profile = senlin_client.create_profile(**params)
return _show_profile(senlin_client, profile_id=profile.id)
class UpdateProfile(command.ShowOne):
"""Update a profile."""
log = logging.getLogger(__name__ + ".UpdateProfile")
def get_parser(self, prog_name):
parser = super(UpdateProfile, self).get_parser(prog_name)
parser.add_argument(
'--name',
metavar='<name>',
help=_('The new name for the profile')
)
parser.add_argument(
'--metadata',
metavar='<"key1=value1;key2=value2...">',
help=_("Metadata values to be attached to the profile. "
"This can be specified multiple times, or once with "
"key-value pairs separated by a semicolon. Use '{}' "
"can clean metadata "),
action='append'
)
parser.add_argument(
'profile',
metavar='<profile>',
help=_('Name or ID of the profile to update')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
senlin_client = self.app.client_manager.clustering
params = {
'name': parsed_args.name,
}
if parsed_args.metadata:
params['metadata'] = senlin_utils.format_parameters(
parsed_args.metadata)
# Find the profile first, we need its id
profile = senlin_client.find_profile(parsed_args.profile)
if profile is None:
raise exc.CommandError(_('Profile not found: %s') %
parsed_args.profile)
senlin_client.update_profile(profile.id, **params)
return _show_profile(senlin_client, profile_id=profile.id)
class ValidateProfile(command.ShowOne):
"""Validate a profile."""
log = logging.getLogger(__name__ + ".ValidateProfile")
def get_parser(self, prog_name):
parser = super(ValidateProfile, self).get_parser(prog_name)
parser.add_argument(
'--spec-file',
metavar='<spec-file>',
required=True,
help=_('The spec file of the profile to be validated')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
senlin_client = self.app.client_manager.clustering
spec = senlin_utils.get_spec_content(parsed_args.spec_file)
type_name = spec.get('type', None)
type_version = spec.get('version', None)
properties = spec.get('properties', None)
if type_name is None:
raise exc.CommandError(_("Missing 'type' key in spec file."))
if type_version is None:
raise exc.CommandError(_("Missing 'version' key in spec file."))
if properties is None:
raise exc.CommandError(_("Missing 'properties' key in spec file."))
if type_name == 'os.heat.stack':
stack_properties = senlin_utils.process_stack_spec(properties)
spec['properties'] = stack_properties
params = {
'spec': spec,
}
profile = senlin_client.validate_profile(**params)
formatters = {}
formatters['metadata'] = senlin_utils.json_formatter
formatters['spec'] = senlin_utils.nested_dict_formatter(
['type', 'version', 'properties'],
['property', 'value'])
columns = [
'created_at',
'domain',
'id',
'metadata',
'name',
'project_id',
'spec',
'type',
'updated_at',
'user_id'
]
return columns, utils.get_dict_properties(profile.to_dict(), columns,
formatters=formatters)
| {
"content_hash": "ce97f878a17b2fc93cdbdcd04a7f4884",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 79,
"avg_line_length": 34.76123595505618,
"alnum_prop": 0.5465858585858586,
"repo_name": "stackforge/python-senlinclient",
"id": "ac116cf7865122db1f1269920573ab9fe7bd411c",
"size": "12924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "senlinclient/v1/profile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "404602"
},
{
"name": "Shell",
"bytes": "3347"
}
],
"symlink_target": ""
} |
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
import array
import itertools
import sys
from multiprocessing import TimeoutError, cpu_count
from multiprocessing.dummy.connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event
from Queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
self._parent._children[self] = None
threading.Thread.start(self)
@property
def exitcode(self):
if self._start_called and not self.is_alive():
return 0
else:
return None
if sys.version_info < (2,6):
name = property(threading.Thread.getName, threading.Thread.setName)
#
#
#
class Condition(threading._Condition):
notify_all = threading._Condition.notify_all.im_func
#
#
#
Process = DummyProcess
current_process = threading.current_thread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.is_alive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = self.__dict__.items()
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from multiprocessing.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
| {
"content_hash": "5bd08ed73eeec8b2618f51c558a5b00a",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 79,
"avg_line_length": 24.0327868852459,
"alnum_prop": 0.6292633015006821,
"repo_name": "tsheasha/python-multiprocessing",
"id": "a6b1f8099ac0ae98372436a5ae8b94cfb3a0d236",
"size": "3101",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Lib/multiprocessing/dummy/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "82347"
},
{
"name": "Makefile",
"bytes": "1064"
},
{
"name": "Python",
"bytes": "248998"
}
],
"symlink_target": ""
} |
import re
import os
import time
import requests
import MySQLdb
from bs4 import BeautifulSoup
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def get_titles(url):
response = requests.get(url)
if response.status_code == 200:
html = BeautifulSoup(response.content,'lxml')
title_list=[]
for i in html.find_all('tr',class_='lead'):
x=i.find('strong').text
if isinstance(x, unicode):
x=x.encode()
title_list.append(x)
insert_mysql('2016','05','AsiaCCS',title_list)
def insert_mysql(year, month, conf_name, papers):
try:
tablename='papertitle'
conn = MySQLdb.connect(host="127.0.0.1", user="root", passwd="******", db="conference")
c = conn.cursor()
for p in papers:
sql = "insert into "+tablename+"(year,month,name,title) values(%s,%s,%s,%s)"
param=(year,month,conf_name,p)
c.execute(sql,param)
print "insert..."
conn.commit()
c.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def main():
get_titles("http://meeting.xidian.edu.cn/conference/AsiaCCS2016/papers.html")
if __name__ == '__main__':
main() | {
"content_hash": "72ffaf95fbc24c0f6414b78f43a00fa3",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 95,
"avg_line_length": 28,
"alnum_prop": 0.5933441558441559,
"repo_name": "dcclogin/TextGenerator",
"id": "cc09c3d534e70a100696a056dd1fba2133e750f3",
"size": "1256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TitleCrawler/examples/asiaccs2016.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "314159"
}
],
"symlink_target": ""
} |
import bvpl_batch
import time
import os
#time.sleep(30);
bvpl_batch.register_processes();
bvpl_batch.register_datatypes();
class dbvalue:
def __init__(self, index, type):
self.id = index # unsigned integer
self.type = type # string
find_corners = 1;
pair_corners = 1;
save_corners_vrml = 1;
save_pairs_vrml = 1;
data_dir = "/Users/isa/Experiments/CapitolSFM/few_windows"
corner_length = 3;
corner_width = 3;
corner_thickness =1;
directions = "main_corners"
output_dir = "/Users/isa/Experiments/CapitolSFM/few_windows/corner2d_coexist/main_corners_331_n"
num_corners=4;
#directions = "main_plane"
#output_dir = "/Users/isa/Experiments/CapitolSFM/few_windows/corner2d_coexist/main_plane_331"
#num_corners = 8;
#directions = "all_corners"
#output_dir = "/Users/isa/Experiments/CapitolSFM/few_windows/corner2d_coexist/all_corners_331"
#num_corners = 104;#112;
if not os.path.isdir( output_dir + "/"):
os.mkdir( output_dir + "/");
if (find_corners):
print("Load Voxel Grid");
bvpl_batch.init_process("bvxmLoadGridProcess");
bvpl_batch.set_input_string(0, data_dir +"/KL_gaussf1.vox");
bvpl_batch.set_input_string(1,"bsta_gauss_f1");
bvpl_batch.run_process();
(world_id,world_type)= bvpl_batch.commit_output(0);
app_grid = dbvalue(world_id,world_type);
print("Creating corner 2d kernel");
bvpl_batch.init_process("bvplCreateCorner2dKernelVectorProcess");
bvpl_batch.set_input_unsigned(0, corner_length); #half length
bvpl_batch.set_input_unsigned(1, corner_width); #half width
bvpl_batch.set_input_unsigned(2, corner_thickness); #half thickness
bvpl_batch.set_input_string(3, directions);
bvpl_batch.run_process();
(kernel_id,kernel_type)= bvpl_batch.commit_output(0);
corners_kernel_vector = dbvalue(kernel_id,kernel_type);
print("Running Kernels");
bvpl_batch.init_process("bvplSuppressAndCombineProcess");
bvpl_batch.set_input_from_db(0,app_grid );
bvpl_batch.set_input_from_db(1,corners_kernel_vector);
bvpl_batch.set_input_string(2,"bsta_gauss_f1");
bvpl_batch.set_input_string(3,"negative_gauss_convolution");
bvpl_batch.set_input_string(4, output_dir + "/KL_gaussf1_response.vox");
bvpl_batch.set_input_string(5, output_dir + "/KL_gaussf1_id.vox");
bvpl_batch.run_process();
(all_resp_grid_id,all_resp_grid_type)= bvpl_batch.commit_output(0);
all_resp_grid = dbvalue(all_resp_grid_id,all_resp_grid_type);
(all_id_grid_id,all_id_grid_type)= bvpl_batch.commit_output(1);
all_id_grid = dbvalue(all_id_grid_id, all_id_grid_type);
print("Getting top response");
bvpl_batch.init_process("bvplExtractTopResponseProcess");
bvpl_batch.set_input_from_db(0,all_resp_grid );
bvpl_batch.set_input_from_db(1,all_id_grid);
bvpl_batch.set_input_unsigned(2,0);
bvpl_batch.set_input_string(3, output_dir + "/KL_top_resp.vox");
bvpl_batch.set_input_string(4, output_dir + "/KL_top_id.vox");
bvpl_batch.run_process();
(response_grid_id,response_grid_type)= bvpl_batch.commit_output(0);
response_grid = dbvalue(response_grid_id,response_grid_type);
(id_grid_id,id_grid_type)= bvpl_batch.commit_output(1);
id_grid = dbvalue(id_grid_id,id_grid_type);
if pair_corners:
print("Creating kernels to search for corners");
bvpl_batch.init_process("bvplCreateWCKernelVectorProcess");
bvpl_batch.set_input_unsigned(0, 3); #half length
bvpl_batch.set_input_unsigned(1, 4); #half width
bvpl_batch.set_input_unsigned(2, 2); #half thickness
bvpl_batch.set_input_string(3, directions);
bvpl_batch.run_process();
(kernel_id,kernel_type)= bvpl_batch.commit_output(0);
wc_kernel_vector = dbvalue(kernel_id,kernel_type);
print("Searching for corners");
bvpl_batch.init_process("bvplFindCornerPairsProcess");
bvpl_batch.set_input_from_db(0,id_grid );
bvpl_batch.set_input_from_db(1,response_grid );
bvpl_batch.set_input_from_db(2,wc_kernel_vector);
bvpl_batch.set_input_from_db(3,corners_kernel_vector);
bvpl_batch.run_process();
(pairs_id,pairs_type)= bvpl_batch.commit_output(0);
pairs = dbvalue(pairs_id,pairs_type);
if save_corners_vrml :
print("Converting ID to Hue ");
bvpl_batch.init_process("bvplConvertIdToHueProcess");
bvpl_batch.set_input_from_db(0,id_grid );
bvpl_batch.set_input_from_db(1,response_grid );
bvpl_batch.set_input_from_db(2,corners_kernel_vector);
bvpl_batch.set_input_string(3, output_dir + "/hue_KL.vox");
bvpl_batch.set_input_string(4, output_dir + "/hue_KL.svg");
bvpl_batch.run_process();
(hue_grid_id,hue_grid_type)= bvpl_batch.commit_output(0);
hue_grid = dbvalue(hue_grid_id,hue_grid_type);
print("Writing Orientation Grid");
bvpl_batch.init_process("bvxmSaveRGBAGridVrmlProcess");
bvpl_batch.set_input_from_db(0,hue_grid);
bvpl_batch.set_input_float(1,0.0);
bvpl_batch.set_input_string(2,output_dir + "/all_lines.wrl");
bvpl_batch.run_process();
if save_pairs_vrml :
hue = 0.125;
for i in range(0,num_corners,1):
print(i);
print("Visualize pairs");
bvpl_batch.init_process("bvplVisualizeCornerPairsProcess");
bvpl_batch.set_input_from_db(0,pairs );
bvpl_batch.set_input_unsigned(1,i);
bvpl_batch.set_input_string(2,output_dir + "/all_lines.wrl");
bvpl_batch.set_input_bool(3, 0);
bvpl_batch.set_input_float(4, hue);
bvpl_batch.run_process();
hue = hue + 1.0/float(num_corners);
print("Writing Response Grid");
bvpl_batch.init_process("bvxmSaveGridRawProcess");
bvpl_batch.set_input_from_db(0,response_grid);
bvpl_batch.set_input_string(1,output_dir + "/KL_resp.raw");
bvpl_batch.run_process();
| {
"content_hash": "aafe740a50623929368e76b3444f187c",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 96,
"avg_line_length": 35.15822784810127,
"alnum_prop": 0.70999099909991,
"repo_name": "mirestrepo/voxels-at-lems",
"id": "92db0d3bc89083abaffbe40a72089b7d31ff7295",
"size": "5645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bvpl/coexist/pair_appearance_corners.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1426982"
},
{
"name": "Shell",
"bytes": "360033"
},
{
"name": "TeX",
"bytes": "568"
},
{
"name": "nesC",
"bytes": "374"
}
],
"symlink_target": ""
} |
"""
pyrseas.dbobject.textsearch
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This defines eight classes: TSConfiguration, TSDictionary,
TSParser and TSTemplate derived from DbSchemaObject, and
TSConfigurationDict, TSDictionaryDict, TSParserDict and
TSTemplateDict derived from DbObjectDict.
"""
from pyrseas.dbobject import DbObjectDict, DbSchemaObject
from pyrseas.dbobject import commentable, ownable
class TSConfiguration(DbSchemaObject):
"""A text search configuration definition"""
keylist = ['schema', 'name']
objtype = "TEXT SEARCH CONFIGURATION"
single_extern_file = True
def to_map(self, no_owner):
"""Convert a text search configuration to a YAML-suitable format
:return: dictionary
"""
dct = self._base_map(no_owner)
if '.' in self.parser:
(sch, pars) = self.parser.split('.')
if sch == self.schema:
dct['parser'] = pars
return dct
@commentable
@ownable
def create(self):
"""Return SQL statements to CREATE the configuration
:return: SQL statements
"""
clauses = []
clauses.append("PARSER = %s" % self.parser)
return ["CREATE TEXT SEARCH CONFIGURATION %s (\n %s)" % (
self.qualname(), ',\n '.join(clauses))]
class TSConfigurationDict(DbObjectDict):
"The collection of text search configurations in a database"
cls = TSConfiguration
query = \
"""SELECT nc.nspname AS schema, cfgname AS name, rolname AS owner,
np.nspname || '.' || prsname AS parser,
obj_description(c.oid, 'pg_catalog.pg_ts_config') AS description
FROM pg_catalog.pg_ts_config c
JOIN pg_roles r ON (r.oid = cfgowner)
JOIN pg_catalog.pg_ts_parser p ON (cfgparser = p.oid)
JOIN pg_namespace nc ON (cfgnamespace = nc.oid)
JOIN pg_namespace np ON (prsnamespace = np.oid)
WHERE (nc.nspname != 'pg_catalog'
AND nc.nspname != 'information_schema')
ORDER BY nc.nspname, cfgname"""
def from_map(self, schema, inconfigs):
"""Initialize the dictionary of configs by examining the input map
:param schema: schema owning the configurations
:param inconfigs: input YAML map defining the configurations
"""
for key in inconfigs:
if not key.startswith('text search configuration '):
raise KeyError("Unrecognized object type: %s" % key)
tsc = key[26:]
self[(schema.name, tsc)] = config = TSConfiguration(
schema=schema.name, name=tsc)
inconfig = inconfigs[key]
if inconfig:
for attr, val in list(inconfig.items()):
setattr(config, attr, val)
if 'oldname' in inconfig:
config.oldname = inconfig['oldname']
del inconfig['oldname']
if 'description' in inconfig:
config.description = inconfig['description']
def diff_map(self, inconfigs):
"""Generate SQL to transform existing configurations
:param input_map: a YAML map defining the new configurations
:return: list of SQL statements
Compares the existing configuration definitions, as fetched from the
catalogs, to the input map and generates SQL statements to
transform the configurations accordingly.
"""
stmts = []
# check input configurations
for (sch, tsc) in inconfigs:
intsc = inconfigs[(sch, tsc)]
# does it exist in the database?
if (sch, tsc) in self:
stmts.append(self[(sch, tsc)].diff_map(intsc))
else:
# check for possible RENAME
if hasattr(intsc, 'oldname'):
oldname = intsc.oldname
try:
stmts.append(self[oldname].rename(intsc.name))
del self[oldname]
except KeyError as exc:
exc.args = ("Previous name '%s' for configuration "
"'%s' not found" % (oldname, intsc.name), )
raise
else:
# create new configuration
stmts.append(intsc.create())
# check database configurations
for (sch, tsc) in self:
# if missing, drop it
if (sch, tsc) not in inconfigs:
stmts.append(self[(sch, tsc)].drop())
return stmts
class TSDictionary(DbSchemaObject):
"""A text search dictionary definition"""
keylist = ['schema', 'name']
objtype = "TEXT SEARCH DICTIONARY"
single_extern_file = True
@commentable
@ownable
def create(self):
"""Return SQL statements to CREATE the dictionary
:return: SQL statements
"""
clauses = []
clauses.append("TEMPLATE = %s" % self.template)
if hasattr(self, 'options'):
clauses.append(self.options)
return ["CREATE TEXT SEARCH DICTIONARY %s (\n %s)" % (
self.qualname(), ',\n '.join(clauses))]
class TSDictionaryDict(DbObjectDict):
"The collection of text search dictionaries in a database"
cls = TSDictionary
query = \
"""SELECT nspname AS schema, dictname AS name, rolname AS owner,
tmplname AS template, dictinitoption AS options,
obj_description(d.oid, 'pg_catalog.pg_ts_dict') AS description
FROM pg_catalog.pg_ts_dict d JOIN pg_catalog.pg_ts_template t ON (dicttemplate = t.oid)
JOIN pg_roles r ON (r.oid = dictowner)
JOIN pg_namespace n ON (dictnamespace = n.oid)
WHERE (nspname != 'pg_catalog' AND nspname != 'information_schema')
ORDER BY nspname, dictname"""
def from_map(self, schema, indicts):
"""Initialize the dictionary of dictionaries by examining the input map
:param schema: schema owning the dictionaries
:param indicts: input YAML map defining the dictionaries
"""
for key in indicts:
if not key.startswith('text search dictionary '):
raise KeyError("Unrecognized object type: %s" % key)
tsd = key[23:]
self[(schema.name, tsd)] = tsdict = TSDictionary(
schema=schema.name, name=tsd)
indict = indicts[key]
if indict:
for attr, val in list(indict.items()):
setattr(tsdict, attr, val)
if 'oldname' in indict:
tsdict.oldname = indict['oldname']
del indict['oldname']
if 'description' in indict:
tsdict.description = indict['description']
def diff_map(self, indicts):
"""Generate SQL to transform existing dictionaries
:param input_map: a YAML map defining the new dictionaries
:return: list of SQL statements
Compares the existing dictionary definitions, as fetched from the
catalogs, to the input map and generates SQL statements to
transform the dictionaries accordingly.
"""
stmts = []
# check input dictionaries
for (sch, tsd) in indicts:
intsd = indicts[(sch, tsd)]
# does it exist in the database?
if (sch, tsd) in self:
stmts.append(self[(sch, tsd)].diff_map(intsd))
else:
# check for possible RENAME
if hasattr(intsd, 'oldname'):
oldname = intsd.oldname
try:
stmts.append(self[oldname].rename(intsd.name))
del self[oldname]
except KeyError as exc:
exc.args = ("Previous name '%s' for dictionary '%s' "
"not found" % (oldname, intsd.name), )
raise
else:
# create new dictionary
stmts.append(intsd.create())
# check database dictionaries
for (sch, tsd) in self:
# if missing, drop it
if (sch, tsd) not in indicts:
stmts.append(self[(sch, tsd)].drop())
return stmts
class TSParser(DbSchemaObject):
"""A text search parser definition"""
keylist = ['schema', 'name']
objtype = "TEXT SEARCH PARSER"
single_extern_file = True
@commentable
@ownable
def create(self):
"""Return SQL statements to CREATE the parser
:return: SQL statements
"""
clauses = []
for attr in ['start', 'gettoken', 'end', 'lextypes']:
clauses.append("%s = %s" % (attr.upper(), getattr(self, attr)))
if hasattr(self, 'headline'):
clauses.append("HEADLINE = %s" % self.headline)
return ["CREATE TEXT SEARCH PARSER %s (\n %s)" % (
self.qualname(), ',\n '.join(clauses))]
class TSParserDict(DbObjectDict):
"The collection of text search parsers in a database"
cls = TSParser
query = \
"""SELECT nspname AS schema, prsname AS name,
prsstart::regproc AS start, prstoken::regproc AS gettoken,
prsend::regproc AS end, prslextype::regproc AS lextypes,
prsheadline::regproc AS headline,
obj_description(p.oid, 'pg_catalog.pg_ts_parser') AS description
FROM pg_catalog.pg_ts_parser p
JOIN pg_namespace n ON (prsnamespace = n.oid)
WHERE (nspname != 'pg_catalog' AND nspname != 'information_schema')
ORDER BY nspname, prsname"""
def from_map(self, schema, inparsers):
"""Initialize the dictionary of parsers by examining the input map
:param schema: schema owning the parsers
:param inparsers: input YAML map defining the parsers
"""
for key in inparsers:
if not key.startswith('text search parser '):
raise KeyError("Unrecognized object type: %s" % key)
tsp = key[19:]
self[(schema.name, tsp)] = parser = TSParser(
schema=schema.name, name=tsp)
inparser = inparsers[key]
if inparser:
for attr, val in list(inparser.items()):
setattr(parser, attr, val)
if 'oldname' in inparser:
parser.oldname = inparser['oldname']
del inparser['oldname']
if 'description' in inparser:
parser.description = inparser['description']
def diff_map(self, inparsers):
"""Generate SQL to transform existing parsers
:param input_map: a YAML map defining the new parsers
:return: list of SQL statements
Compares the existing parser definitions, as fetched from the
catalogs, to the input map and generates SQL statements to
transform the parsers accordingly.
"""
stmts = []
# check input parsers
for (sch, tsp) in inparsers:
intsp = inparsers[(sch, tsp)]
# does it exist in the database?
if (sch, tsp) in self:
stmts.append(self[(sch, tsp)].diff_map(intsp))
else:
# check for possible RENAME
if hasattr(intsp, 'oldname'):
oldname = intsp.oldname
try:
stmts.append(self[oldname].rename(intsp.name))
del self[oldname]
except KeyError as exc:
exc.args = ("Previous name '%s' for parser '%s' "
"not found" % (oldname, intsp.name), )
raise
else:
# create new parser
stmts.append(intsp.create())
# check database parsers
for (sch, tsp) in self:
# if missing, drop it
if (sch, tsp) not in inparsers:
stmts.append(self[(sch, tsp)].drop())
return stmts
class TSTemplate(DbSchemaObject):
"""A text search template definition"""
keylist = ['schema', 'name']
objtype = "TEXT SEARCH TEMPLATE"
single_extern_file = True
@commentable
def create(self):
"""Return SQL statements to CREATE the template
:return: SQL statements
"""
clauses = []
if hasattr(self, 'init'):
clauses.append("INIT = %s" % self.init)
clauses.append("LEXIZE = %s" % self.lexize)
return ["CREATE TEXT SEARCH TEMPLATE %s (\n %s)" % (
self.qualname(), ',\n '.join(clauses))]
class TSTemplateDict(DbObjectDict):
"The collection of text search templates in a database"
cls = TSTemplate
query = \
"""SELECT nspname AS schema, tmplname AS name,
tmplinit::regproc AS init, tmpllexize::regproc AS lexize,
obj_description(p.oid, 'pg_catalog.pg_ts_template') AS description
FROM pg_catalog.pg_ts_template p
JOIN pg_namespace n ON (tmplnamespace = n.oid)
WHERE (nspname != 'pg_catalog' AND nspname != 'information_schema')
ORDER BY nspname, tmplname"""
def from_map(self, schema, intemplates):
"""Initialize the dictionary of templates by examining the input map
:param schema: schema owning the templates
:param intemplates: input YAML map defining the templates
"""
for key in intemplates:
if not key.startswith('text search template '):
raise KeyError("Unrecognized object type: %s" % key)
tst = key[21:]
self[(schema.name, tst)] = template = TSTemplate(
schema=schema.name, name=tst)
intemplate = intemplates[key]
if intemplate:
for attr, val in list(intemplate.items()):
setattr(template, attr, val)
if 'oldname' in intemplate:
template.oldname = intemplate['oldname']
del intemplate['oldname']
if 'description' in intemplate:
template.description = intemplate['description']
def diff_map(self, intemplates):
"""Generate SQL to transform existing templates
:param input_map: a YAML map defining the new templates
:return: list of SQL statements
Compares the existing template definitions, as fetched from the
catalogs, to the input map and generates SQL statements to
transform the templates accordingly.
"""
stmts = []
# check input templates
for (sch, tst) in intemplates:
intst = intemplates[(sch, tst)]
# does it exist in the database?
if (sch, tst) in self:
stmts.append(self[(sch, tst)].diff_map(intst))
else:
# check for possible RENAME
if hasattr(intst, 'oldname'):
oldname = intst.oldname
try:
stmts.append(self[oldname].rename(intst.name))
del self[oldname]
except KeyError as exc:
exc.args = ("Previous name '%s' for template '%s' "
"not found" % (oldname, intst.name), )
raise
else:
# create new template
stmts.append(intst.create())
# check database templates
for (sch, tst) in self:
# if missing, drop it
if (sch, tst) not in intemplates:
stmts.append(self[(sch, tst)].drop())
return stmts
| {
"content_hash": "78055a8211e460db29d91a7123b878bd",
"timestamp": "",
"source": "github",
"line_count": 412,
"max_line_length": 98,
"avg_line_length": 38.86165048543689,
"alnum_prop": 0.5476859658984449,
"repo_name": "reedstrm/Pyrseas",
"id": "ba70d1f8c7f8dfbe5c53df358738d671b65dc337",
"size": "16035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyrseas/dbobject/textsearch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "352"
},
{
"name": "PLpgSQL",
"bytes": "55358"
},
{
"name": "Python",
"bytes": "742133"
}
],
"symlink_target": ""
} |
from datetime import datetime
import numpy as np
import pytest
from pandas import DataFrame, Series
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import period_range
# The various methods we support
downsample_methods = [
"min",
"max",
"first",
"last",
"sum",
"mean",
"sem",
"median",
"prod",
"var",
"std",
"ohlc",
"quantile",
]
upsample_methods = ["count", "size"]
series_methods = ["nunique"]
resample_methods = downsample_methods + upsample_methods + series_methods
@pytest.fixture(params=downsample_methods)
def downsample_method(request):
"""Fixture for parametrization of Grouper downsample methods."""
return request.param
@pytest.fixture(params=resample_methods)
def resample_method(request):
"""Fixture for parametrization of Grouper resample methods."""
return request.param
@pytest.fixture
def simple_date_range_series():
"""
Series with date range index and random data for test purposes.
"""
def _simple_date_range_series(start, end, freq="D"):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
return _simple_date_range_series
@pytest.fixture
def simple_period_range_series():
"""
Series with period range index and random data for test purposes.
"""
def _simple_period_range_series(start, end, freq="D"):
rng = period_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
return _simple_period_range_series
@pytest.fixture
def _index_start():
"""Fixture for parametrization of index, series and frame."""
return datetime(2005, 1, 1)
@pytest.fixture
def _index_end():
"""Fixture for parametrization of index, series and frame."""
return datetime(2005, 1, 10)
@pytest.fixture
def _index_freq():
"""Fixture for parametrization of index, series and frame."""
return "D"
@pytest.fixture
def _index_name():
"""Fixture for parametrization of index, series and frame."""
return None
@pytest.fixture
def index(_index_factory, _index_start, _index_end, _index_freq, _index_name):
"""
Fixture for parametrization of date_range, period_range and
timedelta_range indexes
"""
return _index_factory(_index_start, _index_end, freq=_index_freq, name=_index_name)
@pytest.fixture
def _static_values(index):
"""
Fixture for parametrization of values used in parametrization of
Series and DataFrames with date_range, period_range and
timedelta_range indexes
"""
return np.arange(len(index))
@pytest.fixture
def _series_name():
"""
Fixture for parametrization of Series name for Series used with
date_range, period_range and timedelta_range indexes
"""
return None
@pytest.fixture
def series(index, _series_name, _static_values):
"""
Fixture for parametrization of Series with date_range, period_range and
timedelta_range indexes
"""
return Series(_static_values, index=index, name=_series_name)
@pytest.fixture
def empty_series_dti(series):
"""
Fixture for parametrization of empty Series with date_range,
period_range and timedelta_range indexes
"""
return series[:0]
@pytest.fixture
def frame(index, _series_name, _static_values):
"""
Fixture for parametrization of DataFrame with date_range, period_range
and timedelta_range indexes
"""
# _series_name is intentionally unused
return DataFrame({"value": _static_values}, index=index)
@pytest.fixture
def empty_frame_dti(series):
"""
Fixture for parametrization of empty DataFrame with date_range,
period_range and timedelta_range indexes
"""
index = series.index[:0]
return DataFrame(index=index)
@pytest.fixture(params=[Series, DataFrame])
def series_and_frame(request, series, frame):
"""
Fixture for parametrization of Series and DataFrame with date_range,
period_range and timedelta_range indexes
"""
if request.param == Series:
return series
if request.param == DataFrame:
return frame
| {
"content_hash": "34cee9f80e476261b2f8d113495459b1",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 87,
"avg_line_length": 24.696428571428573,
"alnum_prop": 0.6861894432393347,
"repo_name": "jreback/pandas",
"id": "cb62263b885aa49601729256d34313afdf951092",
"size": "4149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/resample/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406353"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "14930989"
},
{
"name": "Shell",
"bytes": "29317"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
} |
from .errors import APIError
from .auth import Mac
from .client import Client
from .urls import rtmp_publish_url, rtmp_play_url, hls_play_url, hdl_play_url, snapshot_play_url
import conf | {
"content_hash": "c8d40270c11d733866e092d3c6471836",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 96,
"avg_line_length": 37.2,
"alnum_prop": 0.7903225806451613,
"repo_name": "fancl20/pili-python",
"id": "ecae01ada803bc5689acf8db479d3a721cd81a26",
"size": "186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pili/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10420"
}
],
"symlink_target": ""
} |
import Q.lua_executor as executor
import Q.utils as util
from Q import constants as q_consts
from Q.p_vector import PVector
from Q.p_reducer import PReducer
from Q.p_scalar import PScalar
import math
def call_lua_op(op_name, *args):
"""
this functions calls the given Q-lua function with specified arguments
Parameters:
op_name: operation (Q-lua function) name (is a string)
args: arguments to be passed to specified function
Return:
execution result of a function
"""
# convert the python objects to lua
args_table = util.pack_args(args)
try:
func = executor.eval_lua(q_consts.lua_op_fn_str)
result = func(op_name, args_table)
except Exception as e:
# TODO: Handle operator level failures properly
print(str(e))
result = None
if result:
# wrap the lua objects to python
result = util.wrap_output(op_name, result)
return result
def __get_default_dtype(val_type):
"""returns the default types"""
if val_type == int:
dtype = q_consts.int64
elif val_type == float:
dtype = q_consts.float64
else:
raise Exception("input element type %s is not supported" % val_type)
return dtype
# ==============================================
def array(in_vals, dtype=None):
"""Wrapper around Q.mk_col"""
assert((type(in_vals) == list) or (type(in_vals) == tuple))
if not dtype:
val_type = type(in_vals[0])
dtype = __get_default_dtype(val_type)
if dtype not in q_consts.supported_dtypes:
raise Exception("dtype %s is not supported" % dtype)
# convert in_vals to lua table
in_vals = util.to_table(in_vals)
# call wrapper function
return call_lua_op(q_consts.MK_COL, in_vals, dtype)
def full(shape, fill_value, dtype=None):
"""Create a constant vector, wrapper around Q.const"""
assert(type(shape) == int)
if not dtype:
val_type = type(fill_value)
dtype = __get_default_dtype(val_type)
if dtype not in q_consts.supported_dtypes:
raise Exception("dtype %s is not supported" % dtype)
# call wrapper function
in_val = {'val': fill_value, 'qtype': dtype, 'len': shape}
return call_lua_op(q_consts.CONST, in_val)
def zeros(shape, dtype=None):
"""Create a constant vector with value zero"""
return full(shape, 0, dtype)
def ones(shape, dtype=None):
"""Create a constant vector with value one"""
return full(shape, 1, dtype)
def arange(start=0, stop=None, step=1, dtype=None):
"""Create a sequence according to inputs, wrapper around Q.seq()"""
if not stop:
stop = start
start = 0
if not (type(stop) == int or type(stop) == float):
raise Exception("stop value can't be %s" % type(stop))
if not dtype:
val_type = type(stop)
dtype = __get_default_dtype(val_type)
if dtype not in q_consts.supported_dtypes:
raise Exception("dtype %s is not supported" % dtype)
length = math.ceil(float(stop - start) / step)
# call wrapper function
in_val = {'start': start, 'by': step, 'qtype': dtype, 'len': length}
return call_lua_op(q_consts.SEQ, in_val)
def sqrt(vec):
pass
def exp(vec):
pass
| {
"content_hash": "9e2009f34db13be18230631465cddb21",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 76,
"avg_line_length": 26.99173553719008,
"alnum_prop": 0.6227801592161666,
"repo_name": "NerdWalletOSS/Q",
"id": "71edbd4da5085bd8fdf750fb1eb957ce0851e35a",
"size": "3266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experimental/python_q_wrapper/Q/q_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1528854"
},
{
"name": "C++",
"bytes": "11900"
},
{
"name": "CMake",
"bytes": "414"
},
{
"name": "CSS",
"bytes": "651"
},
{
"name": "Cuda",
"bytes": "4192"
},
{
"name": "HTML",
"bytes": "184009"
},
{
"name": "JavaScript",
"bytes": "12282"
},
{
"name": "Jupyter Notebook",
"bytes": "60539"
},
{
"name": "Lex",
"bytes": "5777"
},
{
"name": "Logos",
"bytes": "18046"
},
{
"name": "Lua",
"bytes": "2273456"
},
{
"name": "Makefile",
"bytes": "72536"
},
{
"name": "Perl",
"bytes": "3421"
},
{
"name": "Python",
"bytes": "121910"
},
{
"name": "R",
"bytes": "1071"
},
{
"name": "RPC",
"bytes": "5973"
},
{
"name": "Shell",
"bytes": "128156"
},
{
"name": "TeX",
"bytes": "819194"
},
{
"name": "Terra",
"bytes": "3360"
},
{
"name": "Vim script",
"bytes": "5911"
},
{
"name": "Yacc",
"bytes": "52645"
}
],
"symlink_target": ""
} |
"""
SlipStream Client
=====
Copyright (C) 2013 SixSq Sarl (sixsq.com)
=====
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import struct
import binascii
from Crypto.PublicKey.RSA import _RSAobj
from Crypto.Util.number import long_to_bytes
from Crypto.Util.py3compat import bord, bchr
def exportSSHKey(self):
eb = long_to_bytes(self.e)
nb = long_to_bytes(self.n)
if bord(eb[0]) & 0x80:
eb = bchr(0x00) + eb
if bord(nb[0]) & 0x80:
nb = bchr(0x00) + nb
keyparts = ['ssh-rsa', eb, nb]
keystring = ''.join([struct.pack(">I", len(kp)) + kp for kp in keyparts])
return 'ssh-rsa ' + binascii.b2a_base64(keystring)[:-1]
def pyCryptoPatch():
_RSAobj.exportSSHKey = exportSSHKey
| {
"content_hash": "4b5a29bc840f02b9cd0e525ca2788f09",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 77,
"avg_line_length": 31.17948717948718,
"alnum_prop": 0.6998355263157895,
"repo_name": "slipstream/SlipStreamClient",
"id": "781a07e932df37dce95196ffbbfa1ca4a78cfacb",
"size": "1216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/src/main/python/slipstream/utils/pyCryptoPatch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "865"
},
{
"name": "PowerShell",
"bytes": "144"
},
{
"name": "Python",
"bytes": "574723"
},
{
"name": "Shell",
"bytes": "6660"
}
],
"symlink_target": ""
} |
"""Implements PatchCamelyon data class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import task_adaptation.data.base as base
from task_adaptation.registry import Registry
import tensorflow_datasets as tfds
@Registry.register("data.patch_camelyon", "class")
class PatchCamelyonData(base.ImageTfdsData):
"""Provides PatchCamelyon data."""
def __init__(self, data_dir=None):
dataset_builder = tfds.builder("patch_camelyon:2.*.*", data_dir=data_dir)
dataset_builder.download_and_prepare()
# Defines dataset specific train/val/trainval/test splits.
tfds_splits = {
"test": "test",
"train": "train",
"val": "validation",
"trainval": "train+validation",
"train800": "train[:800]",
"val200": "validation[:200]",
"train800val200": "train[:800]+validation[:200]",
}
# Creates a dict with example counts.
num_samples_splits = {
"test": dataset_builder.info.splits["test"].num_examples,
"train": dataset_builder.info.splits["train"].num_examples,
"val": dataset_builder.info.splits["validation"].num_examples,
"train800": 800,
"val200": 200,
"train800val200": 1000,
}
num_samples_splits["trainval"] = (
num_samples_splits["train"] + num_samples_splits["val"])
super(PatchCamelyonData, self).__init__(
dataset_builder=dataset_builder,
tfds_splits=tfds_splits,
num_samples_splits=num_samples_splits,
num_preprocessing_threads=400,
shuffle_buffer_size=10000,
# Note: Export only image and label tensors with their original types.
base_preprocess_fn=base.make_get_tensors_fn(["image", "label"]),
num_classes=dataset_builder.info.features["label"].num_classes)
| {
"content_hash": "228558d01acd31e531f40ea87676547d",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 37.51020408163265,
"alnum_prop": 0.6550598476605005,
"repo_name": "google-research/task_adaptation",
"id": "4f495881065e63c21b5db62ea9ee70b07f370046",
"size": "2429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "task_adaptation/data/patch_camelyon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "194475"
},
{
"name": "Shell",
"bytes": "2122"
}
],
"symlink_target": ""
} |
"""A module that provides rapt authentication errors."""
class ReauthError(Exception):
"""Base exception for reauthentication."""
pass
class HttpAccessTokenRefreshError(Exception):
"""Error (with HTTP status) trying to refresh an expired access token."""
def __init__(self, message, status=None):
super(HttpAccessTokenRefreshError, self).__init__(message)
self.status = status
class ReauthUnattendedError(ReauthError):
"""An exception for when reauth cannot be answered."""
def __init__(self):
super(ReauthUnattendedError, self).__init__(
'Reauthentication challenge could not be answered because you are '
'not in an interactive session.')
class ReauthFailError(ReauthError):
"""An exception for when reauth failed."""
def __init__(self, message=None):
super(ReauthFailError, self).__init__(
'Reauthentication challenge failed. {0}'.format(message))
class ReauthAPIError(ReauthError):
"""An exception for when reauth API returned something we can't handle."""
def __init__(self, api_error):
super(ReauthAPIError, self).__init__(
'Reauthentication challenge failed due to API error: {0}.'.format(
api_error))
class ReauthAccessTokenRefreshError(ReauthError):
"""An exception for when we can't get an access token for reauth."""
def __init__(self, message=None, status=None):
super(ReauthAccessTokenRefreshError, self).__init__(
'Failed to get an access token for reauthentication. {0}'.format(
message))
self.status = status
| {
"content_hash": "bd965666e51664cf533423dcdd903aea",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 79,
"avg_line_length": 33.40816326530612,
"alnum_prop": 0.6603543066585217,
"repo_name": "kawamon/hue",
"id": "06e99cbabdde3ce732b6f1eaa47b6a8b4987d3c2",
"size": "2234",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/google-reauth-python-0.1.0/google_reauth/errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
} |
import numpy as np
class mlp:
""" A Multi-Layer Perceptron"""
def __init__(self,inputs,targets,nhidden,outtype='linear',hidtype='logistic'):
""" Constructor """
# Set up network size
self.nin = np.shape(inputs)[1]
self.nout = np.shape(targets)[1]
self.ndata = np.shape(inputs)[0]
self.nhidden = nhidden
self.outtype = outtype
self.hidtype = hidtype
# Initialise network
self.weights1 = (np.random.rand(self.nin,self.nhidden)-0.5)*2/np.sqrt(self.nin)
self.hid_bias = (np.random.rand(1,self.nhidden)-0.5)*2/np.sqrt(self.nin)
self.vis_bias = (np.random.rand(1,self.nin)-0.5)*2/np.sqrt(self.nin)
def mlptrain(self,inputs,targets,eta,niterations,momentum=0.9):
""" Train the thing """
# temporary space for the weight changes
w1_update = np.zeros((np.shape(self.weights1)))
w2_update = np.zeros((np.shape(self.weights1.T)))
# temporary space for the bias changes
hid_bias_update = np.zeros((np.shape(self.hid_bias)))
vis_bias_update = np.zeros((np.shape(self.vis_bias)))
for n in range(niterations):
self.outputs = self.mlpfwd(inputs)
error = 0.5*np.sum((self.outputs-targets)**2)
if (np.mod(n,100)==0): print "Iteration: ",n, " Error: ",error
# Different types of output neurons
if self.outtype == 'linear':
deltao = (self.outputs-targets)/self.ndata # why the division??
elif self.outtype == 'logistic':
deltao = (self.outputs-targets)*self.outputs*(1.0-self.outputs)
elif self.outtype == 'softmax':
deltao = (self.outputs-targets)*(self.outputs*(-self.outputs)+self.outputs)/self.ndata
else:
print "bogus outtype"
if self.hidtype == 'linear':
deltah = np.dot(deltao,np.transpose(self.weights1.T))
elif self.hidtype == 'relu':
deltah = np.maximum(0,np.sign(self.hidden)) * (np.dot(deltao,np.transpose(self.weights1.T)))
elif self.hidtype == 'logistic':
deltah = self.hidden*(1.0-self.hidden)*(np.dot(deltao,np.transpose(self.weights1.T)))
else:
print "bogus hidtype"
w1_update = eta*(np.dot(np.transpose(inputs),deltah)) + momentum*w1_update
w2_update = eta*(np.dot(np.transpose(self.hidden),deltao)) + momentum*w2_update
update = 0.5*(w1_update + w2_update.T)
self.weights1 -= update #+ 0.2*np.sign(self.weights1)
hid_bias_update = eta*(np.sum(deltah,0)) + momentum*hid_bias_update
self.hid_bias -= hid_bias_update
def mlpfwd(self,inputs):
""" Run the network forward """
hidden_psi = np.dot(inputs,self.weights1) + self.hid_bias
# Different types of hidden neurons
if self.hidtype == 'linear':
self.hidden = hidden_psi
elif self.hidtype == 'logistic':
self.hidden = 1.0/(1.0+np.exp(-hidden_psi))
elif self.hidtype == 'relu':
self.hidden = np.maximum(0.0, hidden_psi)
else:
print "bogus hidtype"
linear_out = np.dot(self.hidden,self.weights1.T) + self.vis_bias
# Different types of output neurons
if self.outtype == 'linear':
outputs = linear_out
elif self.outtype == 'logistic':
outputs = 1.0/(1.0+np.exp(-linear_out))
elif self.outtype == 'softmax':
normalisers = np.sum(np.exp(linear_out),axis=1)*np.ones((1,np.shape(linear_out)[0]))
outputs = np.transpose(np.transpose(np.exp(linear_out))/normalisers)
else:
print "bogus outtype"
return outputs
| {
"content_hash": "a2d8c548d14e5b330320689b1127042c",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 108,
"avg_line_length": 40.103092783505154,
"alnum_prop": 0.5634961439588689,
"repo_name": "garibaldu/multicauseRBM",
"id": "4d4c16326251c42c3689aeb85dfe55ec78a65dcf",
"size": "4348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Stephen/mlp_autoenc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "9470565"
},
{
"name": "Python",
"bytes": "121462"
},
{
"name": "Shell",
"bytes": "608"
},
{
"name": "TeX",
"bytes": "232429"
}
],
"symlink_target": ""
} |
import numpy as np
from onnx.reference.op_run import OpRun
class Gather(OpRun):
def _run(self, x, indices, axis=None): # type: ignore
if not x.flags["C_CONTIGUOUS"]:
x = np.ascontiguousarray(x)
if not indices.flags["C_CONTIGUOUS"]:
indices = indices.ascontiguousarray()
if indices.size == 0:
return (np.empty((0,), dtype=x.dtype),)
try:
return (np.take(x, indices, axis=axis),)
except TypeError:
# distribution x86 requires int32.
return (np.take(x, indices.astype(int), axis=axis),)
| {
"content_hash": "7ce10f055bf42b27af45c83a8ae26475",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 64,
"avg_line_length": 33.55555555555556,
"alnum_prop": 0.5827814569536424,
"repo_name": "onnx/onnx",
"id": "0528f21f29552ddd742fff5b8d3ebc8e6ae8336e",
"size": "667",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "onnx/reference/ops/op_gather.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "546"
},
{
"name": "C",
"bytes": "2062"
},
{
"name": "C++",
"bytes": "2003844"
},
{
"name": "CMake",
"bytes": "32553"
},
{
"name": "Jupyter Notebook",
"bytes": "29310"
},
{
"name": "PowerShell",
"bytes": "1157"
},
{
"name": "Python",
"bytes": "2073844"
},
{
"name": "Shell",
"bytes": "2918"
}
],
"symlink_target": ""
} |
"""
print_settings
==============
Django command similar to 'diffsettings' but shows all active Django settings.
"""
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from optparse import make_option
class Command(BaseCommand):
"""print_settings command"""
help = "Print the active Django settings."
option_list = BaseCommand.option_list + (
make_option('--format', default='simple', dest='format',
help='Specifies output format.'),
make_option('--indent', default=4, dest='indent', type='int',
help='Specifies indent level for JSON and YAML'),
)
def handle(self, *args, **options):
a_dict = {}
for attr in dir(settings):
if self.include_attr(attr, args):
value = getattr(settings, attr)
a_dict[attr] = value
for setting in args:
if setting not in a_dict:
raise CommandError('%s not found in settings.' % setting)
output_format = options.get('format', 'json')
indent = options.get('indent', 4)
if output_format == 'json':
json = self.import_json()
print(json.dumps(a_dict, indent=indent))
elif output_format == 'yaml':
import yaml # requires PyYAML
print(yaml.dump(a_dict, indent=indent))
elif output_format == 'pprint':
from pprint import pprint
pprint(a_dict)
else:
self.print_simple(a_dict)
@staticmethod
def include_attr(attr, args):
"""Whether or not to include attribute in output"""
if not attr.startswith('__'):
if args is not ():
if attr in args:
return True
else:
return True
else:
return False
@staticmethod
def print_simple(a_dict):
"""A very simple output format"""
for key, value in a_dict.items():
print('%-40s = %r' % (key, value))
@staticmethod
def import_json():
"""Import a module for JSON"""
try:
import json
except ImportError:
import simplejson as json # NOQA
return json
| {
"content_hash": "70dd88bcadc9fcc385f715b710400989",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 78,
"avg_line_length": 28.575,
"alnum_prop": 0.55249343832021,
"repo_name": "edisonlz/fruit",
"id": "9a4f938c26a018a533700ac8416fea6c7b4f9a1e",
"size": "2286",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "web_project/base/site-packages/django_extensions/management/commands/print_settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1482"
},
{
"name": "Batchfile",
"bytes": "6714"
},
{
"name": "C",
"bytes": "3085"
},
{
"name": "C++",
"bytes": "4823"
},
{
"name": "CSS",
"bytes": "660927"
},
{
"name": "DIGITAL Command Language",
"bytes": "27853"
},
{
"name": "GAP",
"bytes": "6045"
},
{
"name": "Go",
"bytes": "13616"
},
{
"name": "Groff",
"bytes": "7199"
},
{
"name": "HTML",
"bytes": "7678961"
},
{
"name": "Java",
"bytes": "208173"
},
{
"name": "JavaScript",
"bytes": "2626051"
},
{
"name": "Makefile",
"bytes": "16810"
},
{
"name": "Nginx",
"bytes": "19215"
},
{
"name": "PHP",
"bytes": "205978"
},
{
"name": "Perl",
"bytes": "27627"
},
{
"name": "Python",
"bytes": "15609476"
},
{
"name": "Shell",
"bytes": "13663"
},
{
"name": "TeX",
"bytes": "60714"
}
],
"symlink_target": ""
} |
'''Tests for connection objects.'''
__author__ = 'Mark Roach (mrroach@google.com)'
import unittest
import urllib.request, urllib.error, urllib.parse
from io import StringIO
from pyactiveresource import connection
from pyactiveresource import util
from pyactiveresource.tests import http_fake
class Error(Exception):
pass
class ConnectionTest(unittest.TestCase):
def setUp(self):
'''Create test objects.'''
matz = {'id': 1, 'name': 'Matz'}
david = {'id': 2, 'name': 'David'}
self.matz = util.to_xml(matz, root='person')
self.david = util.to_xml(david, root='person')
self.people = util.to_xml([matz, david], root='people')
self.people_single = util.to_xml(
[matz], root='people-single-elements')
self.people_empty = util.to_xml([], root='people-empty-elements')
http_fake.initialize()
self.http = http_fake.TestHandler
self.http.site = 'http://localhost'
self.http.set_response(Error('Bad request'))
self.zero_length_content_headers = {'Content-Length': '0',
'Content-Type': 'application/xml'}
self.header = {'Key': 'value'}
self.connection = connection.Connection(self.http.site)
def assert_response_raises(self, error, code):
response = urllib.error.HTTPError('', code, '', {}, StringIO(''))
self.http.set_response(response)
self.assertRaises(error, self.connection._open, '', '')
def test_handle_bad_request(self):
# 400 is a bad request (e.g. malformed URI or missing request parameter)
self.assert_response_raises(connection.BadRequest, 400)
def test_handle_valid_response(self):
# 2xx and 3xx are valid responses.
for code in [200, 299, 300, 399]:
response = http_fake.FakeResponse(code, str(code))
self.http.set_response(response)
self.assertEquals(self.connection._open('', ''),
connection.Response(code, str(code)))
def test_handle_unauthorized_access(self):
# 401 is an unauthorized request
self.assert_response_raises(connection.UnauthorizedAccess, 401)
def test_handle_forbidden_access(self):
# 403 is a forbidden requst (and authorizing will not help)
self.assert_response_raises(connection.ForbiddenAccess, 403)
def test_handle_resource_not_found(self):
# 404 is a missing resource.
self.assert_response_raises(connection.ResourceNotFound, 404)
def test_handle_method_not_allowed(self):
# 405 is a missing not allowed error
self.assert_response_raises(connection.MethodNotAllowed, 405)
def test_handle_resource_conflict(self):
# 409 is an optimistic locking error
self.assert_response_raises(connection.ResourceConflict, 409)
def test_handle_resource_invalid(self):
# 422 is a validation error
self.assert_response_raises(connection.ResourceInvalid, 422)
def test_handle_client_error(self):
# 4xx are client errors.
for code in [402, 499]:
self.assert_response_raises(connection.ClientError, code)
def test_handle_server_error(self):
# 5xx are server errors.
for code in [500, 599]:
self.assert_response_raises(connection.ServerError, code)
def test_handle_connection_error(self):
# Others are unknown.
for code in [199, 600]:
self.assert_response_raises(connection.ConnectionError, code)
def test_timeout_attribute(self):
self.connection.timeout = 7
self.assertEqual(7, self.connection.timeout)
def test_initialize_raises_argument_error_on_missing_site(self):
self.assertRaises(Exception, connection.Connection, None)
def test_get(self):
self.http.respond_to(
'GET', 'http://localhost/people/1.xml', {}, self.matz)
matz = self.connection.get('/people/1.xml')
self.assertEqual(matz['name'], 'Matz')
def test_head(self):
self.http.respond_to('HEAD', 'http://localhost/people/1.xml', {}, '')
self.assertFalse(self.connection.head('/people/1.xml').body)
def test_get_with_header(self):
self.http.respond_to(
'GET', 'http://localhost/people/2.xml', self.header, self.david)
david = self.connection.get('/people/2.xml', self.header)
self.assertEqual(david['name'], 'David')
def test_get_collection(self):
self.http.respond_to('GET', '/people.xml', {}, self.people)
people = self.connection.get('/people.xml')
self.assertEqual('Matz', people[0]['name'])
self.assertEqual('David', people[1]['name'])
def test_get_collection_single(self):
self.http.respond_to('GET', '/people_single_elements.xml', {},
self.people_single)
people = self.connection.get('/people_single_elements.xml')
self.assertEqual('Matz', people[0]['name'])
def test_get_collection_empty(self):
self.http.respond_to('GET', '/people_empty_elements.xml', {},
self.people_empty)
people = self.connection.get('/people_empty_elements.xml')
self.assertEqual([], people)
def test_post(self):
self.http.respond_to(
'POST', '/people.xml', self.zero_length_content_headers,
'', 200, {'Location': '/people/5.xml'})
response = self.connection.post('/people.xml')
self.assertEqual('/people/5.xml', response['Location'])
def test_post_with_header(self):
header = self.header
header.update(self.zero_length_content_headers)
self.http.respond_to(
'POST', '/members.xml', self.header,
'', 201, {'Location': '/people/6.xml'})
response = self.connection.post('/members.xml', self.header)
self.assertEqual('/people/6.xml', response['Location'])
def test_put(self):
self.http.respond_to('PUT', '/people/1.xml',
self.zero_length_content_headers, '', 204)
response = self.connection.put('/people/1.xml')
self.assertEqual(204, response.code)
def test_put_with_header(self):
header = self.header
header.update(self.zero_length_content_headers)
self.http.respond_to('PUT', '/people/2.xml', header, '', 204)
response = self.connection.put('/people/2.xml', self.header)
self.assertEqual(204, response.code)
def test_delete(self):
self.http.respond_to('DELETE', '/people/1.xml', {}, '')
response = self.connection.delete('/people/1.xml')
self.assertEqual(200, response.code)
def test_delete_with_header(self):
self.http.respond_to('DELETE', '/people/2.xml', self.header, '')
response = self.connection.delete('/people/2.xml', self.header)
self.assertEqual(200, response.code)
'''
ResponseHeaderStub = Struct.new(:code, :message, 'Allow')
def test_should_return_allowed_methods_for_method_no_allowed_exception
begin
handle_response ResponseHeaderStub.new(405, 'HTTP Failed...', 'GET, POST')
rescue connection.MethodNotAllowed => e
self.assertEqual('Failed with 405 HTTP Failed...', e.message
self.assertEqual([:get, :post], e.allowed_methods
uses_mocha('test_timeout') do
def test_timeout
@http = mock('new Net::HTTP')
self.connection.expects(:http).returns(@http)
@http.expects(:get).raises(Timeout::Error, 'execution expired')
assert_raises(connection.TimeoutError) { self.connection.get('/people_timeout.xml') }
'''
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "670fefbe190239e01c0605be3a6fb282",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 91,
"avg_line_length": 38.88944723618091,
"alnum_prop": 0.6260498772451221,
"repo_name": "PiratenBayernIT/pyactiveresource",
"id": "ee93fffef92e527cc82e0fc2de5b347cae096d60",
"size": "7811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/connection_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "120051"
}
],
"symlink_target": ""
} |
"""Manager to read and modify config data in JSON files."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import errno
import glob
import io
import json
import os
import copy
from traitlets.config import LoggingConfigurable
from traitlets.traitlets import Unicode, Bool
def recursive_update(target, new):
"""Recursively update one dictionary using another.
None values will delete their keys.
"""
for k, v in new.items():
if isinstance(v, dict):
if k not in target:
target[k] = {}
recursive_update(target[k], v)
if not target[k]:
# Prune empty subdicts
del target[k]
elif v is None:
target.pop(k, None)
else:
target[k] = v
def remove_defaults(data, defaults):
"""Recursively remove items from dict that are already in defaults"""
# copy the iterator, since data will be modified
for key, value in list(data.items()):
if key in defaults:
if isinstance(value, dict):
remove_defaults(data[key], defaults[key])
if not data[key]: # prune empty subdicts
del data[key]
else:
if value == defaults[key]:
del data[key]
class BaseJSONConfigManager(LoggingConfigurable):
"""General JSON config manager
Deals with persisting/storing config in a json file with optionally
default values in a {section_name}.d directory.
"""
config_dir = Unicode('.')
read_directory = Bool(True)
def ensure_config_dir_exists(self):
"""Will try to create the config_dir directory."""
try:
os.makedirs(self.config_dir, 0o755)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def file_name(self, section_name):
"""Returns the json filename for the section_name: {config_dir}/{section_name}.json"""
return os.path.join(self.config_dir, section_name+'.json')
def directory(self, section_name):
"""Returns the directory name for the section name: {config_dir}/{section_name}.d"""
return os.path.join(self.config_dir, section_name+'.d')
def get(self, section_name, include_root=True):
"""Retrieve the config data for the specified section.
Returns the data as a dictionary, or an empty dictionary if the file
doesn't exist.
When include_root is False, it will not read the root .json file,
effectively returning the default values.
"""
paths = [self.file_name(section_name)] if include_root else []
if self.read_directory:
pattern = os.path.join(self.directory(section_name), '*.json')
# These json files should be processed first so that the
# {section_name}.json take precedence.
# The idea behind this is that installing a Python package may
# put a json file somewhere in the a .d directory, while the
# .json file is probably a user configuration.
paths = sorted(glob.glob(pattern)) + paths
self.log.debug('Paths used for configuration of %s: \n\t%s', section_name, '\n\t'.join(paths))
data = {}
for path in paths:
if os.path.isfile(path):
with io.open(path, encoding='utf-8') as f:
recursive_update(data, json.load(f))
return data
def set(self, section_name, data):
"""Store the given config data.
"""
filename = self.file_name(section_name)
self.ensure_config_dir_exists()
if self.read_directory:
# we will modify data in place, so make a copy
data = copy.deepcopy(data)
defaults = self.get(section_name, include_root=False)
remove_defaults(data, defaults)
# Generate the JSON up front, since it could raise an exception,
# in order to avoid writing half-finished corrupted data to disk.
json_content = json.dumps(data, indent=2)
f = io.open(filename, 'w', encoding='utf-8')
with f:
f.write(json_content)
def update(self, section_name, new_data):
"""Modify the config section by recursively updating it with new_data.
Returns the modified config data as a dictionary.
"""
data = self.get(section_name)
recursive_update(data, new_data)
self.set(section_name, data)
return data
| {
"content_hash": "064dcd375fb4b0f641b37ad735295233",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 102,
"avg_line_length": 34.696969696969695,
"alnum_prop": 0.6063318777292577,
"repo_name": "sserrot/champion_relationships",
"id": "a15c2454b6743efb9ba1049d4cda4a9baad421d9",
"size": "4596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/notebook/config_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
} |
class WKTAdapter(object):
"""
This provides an adaptor for Geometries sent to the
MySQL and Oracle database backends.
"""
def __init__(self, geom):
self.wkt = geom.wkt
self.srid = geom.srid
def __eq__(self, other):
if not isinstance(other, WKTAdapter):
return False
return self.wkt == other.wkt and self.srid == other.srid
def __hash__(self):
return hash((self.wkt, self.srid))
def __str__(self):
return self.wkt
| {
"content_hash": "b86b2164e3b7e1e63d634c79fff4dedd",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 64,
"avg_line_length": 27.736842105263158,
"alnum_prop": 0.5578747628083491,
"repo_name": "yephper/django",
"id": "6d3212f928c2a86fa2d854d8ba31b91e2005a47e",
"size": "527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/gis/db/backends/base/adapter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
"""This module contains Splittable DoFn logic that is specific to DirectRunner.
"""
# pytype: skip-file
import uuid
from threading import Lock
from threading import Timer
from typing import TYPE_CHECKING
from typing import Any
from typing import Iterable
from typing import Optional
import apache_beam as beam
from apache_beam import TimeDomain
from apache_beam import pvalue
from apache_beam.coders import typecoders
from apache_beam.pipeline import AppliedPTransform
from apache_beam.pipeline import PTransformOverride
from apache_beam.runners.common import DoFnContext
from apache_beam.runners.common import DoFnInvoker
from apache_beam.runners.common import DoFnSignature
from apache_beam.runners.common import OutputProcessor
from apache_beam.runners.direct.evaluation_context import DirectStepContext
from apache_beam.runners.direct.util import KeyedWorkItem
from apache_beam.runners.direct.watermark_manager import WatermarkManager
from apache_beam.transforms.core import ParDo
from apache_beam.transforms.core import ProcessContinuation
from apache_beam.transforms.ptransform import PTransform
from apache_beam.transforms.trigger import _ReadModifyWriteStateTag
from apache_beam.utils.windowed_value import WindowedValue
if TYPE_CHECKING:
from apache_beam.iobase import WatermarkEstimator
class SplittableParDoOverride(PTransformOverride):
"""A transform override for ParDo transformss of SplittableDoFns.
Replaces the ParDo transform with a SplittableParDo transform that performs
SDF specific logic.
"""
def matches(self, applied_ptransform):
assert isinstance(applied_ptransform, AppliedPTransform)
transform = applied_ptransform.transform
if isinstance(transform, ParDo):
signature = DoFnSignature(transform.fn)
return signature.is_splittable_dofn()
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
ptransform = applied_ptransform.transform
assert isinstance(ptransform, ParDo)
do_fn = ptransform.fn
signature = DoFnSignature(do_fn)
if signature.is_splittable_dofn():
return SplittableParDo(ptransform)
else:
return ptransform
class SplittableParDo(PTransform):
"""A transform that processes a PCollection using a Splittable DoFn."""
def __init__(self, ptransform):
assert isinstance(ptransform, ParDo)
self._ptransform = ptransform
def expand(self, pcoll):
sdf = self._ptransform.fn
signature = DoFnSignature(sdf)
restriction_coder = signature.get_restriction_coder()
element_coder = typecoders.registry.get_coder(pcoll.element_type)
keyed_elements = (
pcoll
| 'pair' >> ParDo(PairWithRestrictionFn(sdf))
| 'split' >> ParDo(SplitRestrictionFn(sdf))
| 'explode' >> ParDo(ExplodeWindowsFn())
| 'random' >> ParDo(RandomUniqueKeyFn()))
return keyed_elements | ProcessKeyedElements(
sdf,
element_coder,
restriction_coder,
pcoll.windowing,
self._ptransform.args,
self._ptransform.kwargs,
self._ptransform.side_inputs)
class ElementAndRestriction(object):
"""A holder for an element, restriction, and watermark estimator state."""
def __init__(self, element, restriction, watermark_estimator_state):
self.element = element
self.restriction = restriction
self.watermark_estimator_state = watermark_estimator_state
class PairWithRestrictionFn(beam.DoFn):
"""A transform that pairs each element with a restriction."""
def __init__(self, do_fn):
self._signature = DoFnSignature(do_fn)
def start_bundle(self):
self._invoker = DoFnInvoker.create_invoker(
self._signature,
output_processor=_NoneShallPassOutputProcessor(),
process_invocation=False)
def process(self, element, window=beam.DoFn.WindowParam, *args, **kwargs):
initial_restriction = self._invoker.invoke_initial_restriction(element)
watermark_estimator_state = (
self._signature.process_method.watermark_estimator_provider.
initial_estimator_state(element, initial_restriction))
yield ElementAndRestriction(
element, initial_restriction, watermark_estimator_state)
class SplitRestrictionFn(beam.DoFn):
"""A transform that perform initial splitting of Splittable DoFn inputs."""
def __init__(self, do_fn):
self._do_fn = do_fn
def start_bundle(self):
signature = DoFnSignature(self._do_fn)
self._invoker = DoFnInvoker.create_invoker(
signature,
output_processor=_NoneShallPassOutputProcessor(),
process_invocation=False)
def process(self, element_and_restriction, *args, **kwargs):
element = element_and_restriction.element
restriction = element_and_restriction.restriction
restriction_parts = self._invoker.invoke_split(element, restriction)
for part in restriction_parts:
yield ElementAndRestriction(
element, part, element_and_restriction.watermark_estimator_state)
class ExplodeWindowsFn(beam.DoFn):
"""A transform that forces the runner to explode windows.
This is done to make sure that Splittable DoFn proceses an element for each of
the windows that element belongs to.
"""
def process(self, element, window=beam.DoFn.WindowParam, *args, **kwargs):
yield element
class RandomUniqueKeyFn(beam.DoFn):
"""A transform that assigns a unique key to each element."""
def process(self, element, window=beam.DoFn.WindowParam, *args, **kwargs):
# We ignore UUID collisions here since they are extremely rare.
yield (uuid.uuid4().bytes, element)
class ProcessKeyedElements(PTransform):
"""A primitive transform that performs SplittableDoFn magic.
Input to this transform should be a PCollection of keyed ElementAndRestriction
objects.
"""
def __init__(
self,
sdf,
element_coder,
restriction_coder,
windowing_strategy,
ptransform_args,
ptransform_kwargs,
ptransform_side_inputs):
self.sdf = sdf
self.element_coder = element_coder
self.restriction_coder = restriction_coder
self.windowing_strategy = windowing_strategy
self.ptransform_args = ptransform_args
self.ptransform_kwargs = ptransform_kwargs
self.ptransform_side_inputs = ptransform_side_inputs
def expand(self, pcoll):
return pvalue.PCollection.from_(pcoll)
class ProcessKeyedElementsViaKeyedWorkItemsOverride(PTransformOverride):
"""A transform override for ProcessElements transform."""
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, ProcessKeyedElements)
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
return ProcessKeyedElementsViaKeyedWorkItems(applied_ptransform.transform)
class ProcessKeyedElementsViaKeyedWorkItems(PTransform):
"""A transform that processes Splittable DoFn input via KeyedWorkItems."""
def __init__(self, process_keyed_elements_transform):
self._process_keyed_elements_transform = process_keyed_elements_transform
def expand(self, pcoll):
process_elements = ProcessElements(self._process_keyed_elements_transform)
process_elements.args = (
self._process_keyed_elements_transform.ptransform_args)
process_elements.kwargs = (
self._process_keyed_elements_transform.ptransform_kwargs)
process_elements.side_inputs = (
self._process_keyed_elements_transform.ptransform_side_inputs)
return pcoll | beam.core.GroupByKey() | process_elements
class ProcessElements(PTransform):
"""A primitive transform for processing keyed elements or KeyedWorkItems.
Will be evaluated by
`runners.direct.transform_evaluator._ProcessElementsEvaluator`.
"""
def __init__(self, process_keyed_elements_transform):
self._process_keyed_elements_transform = process_keyed_elements_transform
self.sdf = self._process_keyed_elements_transform.sdf
def expand(self, pcoll):
return pvalue.PCollection.from_(pcoll)
def new_process_fn(self, sdf):
return ProcessFn(
sdf,
self._process_keyed_elements_transform.ptransform_args,
self._process_keyed_elements_transform.ptransform_kwargs)
class ProcessFn(beam.DoFn):
"""A `DoFn` that executes machineary for invoking a Splittable `DoFn`.
Input to the `ParDo` step that includes a `ProcessFn` will be a `PCollection`
of `ElementAndRestriction` objects.
This class is mainly responsible for following.
(1) setup environment for properly invoking a Splittable `DoFn`.
(2) invoke `process()` method of a Splittable `DoFn`.
(3) after the `process()` invocation of the Splittable `DoFn`, determine if a
re-invocation of the element is needed. If this is the case, set state and
a timer for a re-invocation and hold output watermark till this
re-invocation.
(4) after the final invocation of a given element clear any previous state set
for re-invoking the element and release the output watermark.
"""
def __init__(self, sdf, args_for_invoker, kwargs_for_invoker):
self.sdf = sdf
self._element_tag = _ReadModifyWriteStateTag('element')
self._restriction_tag = _ReadModifyWriteStateTag('restriction')
self._watermark_state_tag = _ReadModifyWriteStateTag(
'watermark_estimator_state')
self.watermark_hold_tag = _ReadModifyWriteStateTag('watermark_hold')
self._process_element_invoker = None
self._output_processor = _OutputProcessor()
self.sdf_invoker = DoFnInvoker.create_invoker(
DoFnSignature(self.sdf),
context=DoFnContext('unused_context'),
output_processor=self._output_processor,
input_args=args_for_invoker,
input_kwargs=kwargs_for_invoker)
self._step_context = None
@property
def step_context(self):
return self._step_context
@step_context.setter
def step_context(self, step_context):
assert isinstance(step_context, DirectStepContext)
self._step_context = step_context
def set_process_element_invoker(self, process_element_invoker):
assert isinstance(process_element_invoker, SDFProcessElementInvoker)
self._process_element_invoker = process_element_invoker
def process(
self,
element,
timestamp=beam.DoFn.TimestampParam,
window=beam.DoFn.WindowParam,
*args,
**kwargs):
if isinstance(element, KeyedWorkItem):
# Must be a timer firing.
key = element.encoded_key
else:
key, values = element
values = list(values)
assert len(values) == 1
# Value here will either be a WindowedValue or an ElementAndRestriction
# object.
# TODO: handle key collisions here.
assert len(values) == 1, 'Internal error. Processing of splittable ' \
'DoFn cannot continue since elements did not ' \
'have unique keys.'
value = values[0]
if len(values) != 1:
raise ValueError('')
state = self._step_context.get_keyed_state(key)
element_state = state.get_state(window, self._element_tag)
# Initially element_state is an empty list.
is_seed_call = not element_state
if not is_seed_call:
element = state.get_state(window, self._element_tag)
restriction = state.get_state(window, self._restriction_tag)
watermark_estimator_state = state.get_state(
window, self._watermark_state_tag)
windowed_element = WindowedValue(element, timestamp, [window])
else:
# After values iterator is expanded above we should have gotten a list
# with a single ElementAndRestriction object.
assert isinstance(value, ElementAndRestriction)
element_and_restriction = value
element = element_and_restriction.element
restriction = element_and_restriction.restriction
watermark_estimator_state = (
element_and_restriction.watermark_estimator_state)
if isinstance(value, WindowedValue):
windowed_element = WindowedValue(
element, value.timestamp, value.windows)
else:
windowed_element = WindowedValue(element, timestamp, [window])
assert self._process_element_invoker
assert isinstance(self._process_element_invoker, SDFProcessElementInvoker)
output_values = self._process_element_invoker.invoke_process_element(
self.sdf_invoker,
self._output_processor,
windowed_element,
restriction,
watermark_estimator_state,
*args,
**kwargs)
sdf_result = None
for output in output_values:
if isinstance(output, SDFProcessElementInvoker.Result):
# SDFProcessElementInvoker.Result should be the last item yielded.
sdf_result = output
break
yield output
assert sdf_result, ('SDFProcessElementInvoker must return a '
'SDFProcessElementInvoker.Result object as the last '
'value of a SDF invoke_process_element() invocation.')
if not sdf_result.residual_restriction:
# All work for current residual and restriction pair is complete.
state.clear_state(window, self._element_tag)
state.clear_state(window, self._restriction_tag)
state.clear_state(window, self._watermark_state_tag)
# Releasing output watermark by setting it to positive infinity.
state.add_state(
window, self.watermark_hold_tag, WatermarkManager.WATERMARK_POS_INF)
else:
state.add_state(window, self._element_tag, element)
state.add_state(
window, self._restriction_tag, sdf_result.residual_restriction)
state.add_state(
window, self._watermark_state_tag, watermark_estimator_state)
# Holding output watermark by setting it to negative infinity.
state.add_state(
window, self.watermark_hold_tag, WatermarkManager.WATERMARK_NEG_INF)
# Setting a timer to be reinvoked to continue processing the element.
# Currently Python SDK only supports setting timers based on watermark. So
# forcing a reinvocation by setting a timer for watermark negative
# infinity.
# TODO(chamikara): update this by setting a timer for the proper
# processing time when Python SDK supports that.
state.set_timer(
window, '', TimeDomain.WATERMARK, WatermarkManager.WATERMARK_NEG_INF)
class SDFProcessElementInvoker(object):
"""A utility that invokes SDF `process()` method and requests checkpoints.
This class is responsible for invoking the `process()` method of a Splittable
`DoFn` and making sure that invocation terminated properly. Based on the input
configuration, this class may decide to request a checkpoint for a `process()`
execution so that runner can process current output and resume the invocation
at a later time.
More specifically, when initializing a `SDFProcessElementInvoker`, caller may
specify the number of output elements or processing time after which a
checkpoint should be requested. This class is responsible for properly
requesting a checkpoint based on either of these criteria.
When the `process()` call of Splittable `DoFn` ends, this class performs
validations to make sure that processing ended gracefully and returns a
`SDFProcessElementInvoker.Result` that contains information which can be used
by the caller to perform another `process()` invocation for the residual.
A `process()` invocation may decide to give up processing voluntarily by
returning a `ProcessContinuation` object (see documentation of
`ProcessContinuation` for more details). So if a 'ProcessContinuation' is
produced this class ends the execution and performs steps to finalize the
current invocation.
"""
class Result(object):
def __init__(
self,
residual_restriction=None,
process_continuation=None,
future_output_watermark=None):
"""Returned as a result of a `invoke_process_element()` invocation.
Args:
residual_restriction: a restriction for the unprocessed part of the
element.
process_continuation: a `ProcessContinuation` if one was returned as the
last element of the SDF `process()` invocation.
future_output_watermark: output watermark of the results that will be
produced when invoking the Splittable `DoFn`
for the current element with
`residual_restriction`.
"""
self.residual_restriction = residual_restriction
self.process_continuation = process_continuation
self.future_output_watermark = future_output_watermark
def __init__(self, max_num_outputs, max_duration):
self._max_num_outputs = max_num_outputs
self._max_duration = max_duration
self._checkpoint_lock = Lock()
def test_method(self):
raise ValueError
def invoke_process_element(
self,
sdf_invoker,
output_processor,
element,
restriction,
watermark_estimator_state,
*args,
**kwargs):
"""Invokes `process()` method of a Splittable `DoFn` for a given element.
Args:
sdf_invoker: a `DoFnInvoker` for the Splittable `DoFn`.
element: the element to process
Returns:
a `SDFProcessElementInvoker.Result` object.
"""
assert isinstance(sdf_invoker, DoFnInvoker)
class CheckpointState(object):
def __init__(self):
self.checkpointed = None
self.residual_restriction = None
checkpoint_state = CheckpointState()
def initiate_checkpoint():
with self._checkpoint_lock:
if checkpoint_state.checkpointed:
return
checkpoint_state.checkpointed = object()
split = sdf_invoker.try_split(0)
if split:
_, checkpoint_state.residual_restriction = split
else:
# Clear the checkpoint if the split didn't happen. This counters
# a very unlikely race condition that the Timer attempted to initiate
# a checkpoint before invoke_process set the current element allowing
# for another attempt to checkpoint.
checkpoint_state.checkpointed = None
output_processor.reset()
Timer(self._max_duration, initiate_checkpoint).start()
sdf_invoker.invoke_process(
element,
additional_args=args,
restriction=restriction,
watermark_estimator_state=watermark_estimator_state)
assert output_processor.output_iter is not None
output_count = 0
# We have to expand and re-yield here to support ending execution for a
# given number of output elements as well as to capture the
# ProcessContinuation of one was returned.
process_continuation = None
for output in output_processor.output_iter:
# A ProcessContinuation, if returned, should be the last element.
assert not process_continuation
if isinstance(output, ProcessContinuation):
# Taking a checkpoint so that we can determine primary and residual
# restrictions.
initiate_checkpoint()
# A ProcessContinuation should always be the last element produced by
# the output iterator.
# TODO: support continuing after the specified amount of delay.
# Continuing here instead of breaking to enforce that this is the last
# element.
process_continuation = output
continue
yield output
output_count += 1
if self._max_num_outputs and output_count >= self._max_num_outputs:
initiate_checkpoint()
result = (
SDFProcessElementInvoker.Result(
residual_restriction=checkpoint_state.residual_restriction)
if checkpoint_state.residual_restriction else
SDFProcessElementInvoker.Result())
yield result
class _OutputProcessor(OutputProcessor):
def __init__(self):
self.output_iter = None
def process_outputs(
self, windowed_input_element, output_iter, watermark_estimator=None):
# type: (WindowedValue, Iterable[Any], Optional[WatermarkEstimator]) -> None
self.output_iter = output_iter
def reset(self):
self.output_iter = None
class _NoneShallPassOutputProcessor(OutputProcessor):
def process_outputs(
self, windowed_input_element, output_iter, watermark_estimator=None):
# type: (WindowedValue, Iterable[Any], Optional[WatermarkEstimator]) -> None
raise RuntimeError()
| {
"content_hash": "822d1e95152da5296d579f3c5ed57f1e",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 80,
"avg_line_length": 37.855287569573285,
"alnum_prop": 0.7102528915898844,
"repo_name": "axbaretto/beam",
"id": "381394ef7221bb4d0fdd5639cc8292a385493181",
"size": "21189",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/runners/direct/sdf_direct_runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "Batchfile",
"bytes": "3220"
},
{
"name": "C",
"bytes": "1339873"
},
{
"name": "C++",
"bytes": "1132901"
},
{
"name": "CSS",
"bytes": "124283"
},
{
"name": "Dockerfile",
"bytes": "23950"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2795906"
},
{
"name": "Groovy",
"bytes": "187109"
},
{
"name": "HTML",
"bytes": "238575"
},
{
"name": "Java",
"bytes": "39085315"
},
{
"name": "JavaScript",
"bytes": "1221326"
},
{
"name": "Jupyter Notebook",
"bytes": "7396"
},
{
"name": "Makefile",
"bytes": "354938"
},
{
"name": "Python",
"bytes": "51449019"
},
{
"name": "Roff",
"bytes": "70716"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "351541"
},
{
"name": "TeX",
"bytes": "70920"
},
{
"name": "Thrift",
"bytes": "1118"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2013 Rodrigo Baravalle
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from Algorithm import *
import Image
import numpy as np
from math import exp, log10
import scipy.ndimage.filters as sf
import matplotlib
from matplotlib import pyplot as plt
import scipy.signal
class MFS (Algorithm):
"""
:version: 1.0
:author: Rodrigo Baravalle
"""
def __init__(self):
pass
def setDef(self,ind,f,ite,filen):
# parameters: ind -> determines how many levels are used when computing the density
# choose 1 for using directly the image measurement im or
# >= 6 for computing the density of im (quite stable for >=5)
# f ----> determines the dimension of MFS vector
# ite ---> determines how many levels are used when computing MFS for each
self.ind_num = ind # number of pixels for averaging
self.f_num = f # window
self.ite_num = ite
def gauss_kern(self,size, sizey):
""" Returns a normalized 2D gauss kernel array for convolutions """
m = np.float32(size)
n = np.float32(sizey)
sigma = 2; # ???
if(size <= 3): sigma = 1.5;
if(size == 5): sigma = 2.5;
y, x = np.mgrid[-(m-1)/2:(m-1)/2+1, -(n-1)/2:(n-1)/2+1]
b = 2*(sigma**2)
x2 = map(lambda i: map( lambda j: j**2,i), x)
y2 = map(lambda i: map( lambda j: j**2,i), y)
g = np.sum([x2,y2],axis=0).astype(np.float32)
g = np.array(map(lambda i: map( lambda j: exp(-j/b),i), g)).astype(np.float32)
return g / g.sum()
def getFDs(self, filename):
"""
@param string filename : image location
@return [float] : multi fractal dimentions
@author: Rodrigo Baravalle. Code ported from Matlab
"""
im = Image.open(filename)
# Preprocessing: if IM is a color image convert it to a gray image
im = im.convert("L")
im = np.array(im.getdata()).reshape(im.size)
#Using [0..255] to denote the intensity profile of the image
grayscale_box =[0, 255];
#Preprocessing: default intensity value of image ranges from 0 to 255
if(abs(im).max()< 1):
im = im * grayscale_box[1];
#######################
### Estimating density function of the image
### by solving least squares for D in the equation
### log10(bw) = D*log10(c) + b
r = 1.0/max(im.shape)
c = np.dot(range(1,self.ind_num+1),r)
c = map(lambda i: log10(i), c)
bw = np.zeros((self.ind_num,im.shape[0],im.shape[1])).astype(np.float32)
bw[0] = im + 1
k = 1
if(self.ind_num > 1):
bw[1] = scipy.signal.convolve2d(bw[0], self.gauss_kern(k+1,(k+1)),mode="full")[1:,1:]*((k+1)**2)
for k in range(2,self.ind_num):
temp = scipy.signal.convolve2d(bw[0], self.gauss_kern(k+1,(k+1)),mode="full")*((k+1)**2)
if(k==4):
bw[k] = temp[k-1-1:temp.shape[0]-(k/2),k-1-1:temp.shape[1]-(k/2)]
else:
bw[k] = temp[k-1:temp.shape[0]-(1),k-1:temp.shape[1]-(1)]
bw = np.log10(bw)
n1 = c[0]*c[0]
n2 = bw[0]*c[0]
for k in range(1,self.ind_num):
n1 = n1+c[k]*c[k]
n2 = n2 + bw[k]*c[k]
sum3 = bw[0]
for i in range(1,self.ind_num):
sum3 = sum3 + bw[i]
if(self.ind_num >1):
D = (n2*self.ind_num-sum(c)*sum3)/(n1*self.ind_num -sum(c)*sum(c));
if (self.ind_num > 1):
max_D = np.float32(4)
min_D = np.float32(1)
D = grayscale_box[1]*(D-min_D)/(max_D - min_D)+grayscale_box[0]
else:
D = im
#Partition the density
# throw away the boundary
D = D[self.ind_num-1:D.shape[0]-self.ind_num+1, self.ind_num-1:D.shape[1]-self.ind_num+1]
IM = np.zeros(D.shape)
gap = np.ceil((grayscale_box[1] - grayscale_box[0])/np.float32(self.f_num));
center = np.zeros(self.f_num);
for k in range(1,self.f_num+1):
bin_min = (k-1) * gap;
bin_max = k * gap - 1;
center[k-1] = round((bin_min + bin_max) / 2);
D = ((D <= bin_max) & (D >= bin_min)).choose(D,center[k-1])
D = ((D >= bin_max)).choose(D,0)
D = ((D < 0)).choose(D,0)
IM = D
#Constructing the filter for approximating log fitting
r = max(IM.shape)
c = np.zeros(self.ite_num)
c[0] = 1;
for k in range(1,self.ite_num):
c[k] = c[k-1]/(k+1)
c = c / sum(c);
#Construct level sets
Idx_IM = np.zeros(IM.shape);
for k in range(0,self.f_num):
IM = (IM == center[k]).choose(IM,k+1)
Idx_IM = IM
IM = np.zeros(IM.shape)
#Estimate MFS by box-counting
num = np.zeros(self.ite_num)
MFS = np.zeros(self.f_num)
for k in range(1,self.f_num+1):
IM = np.zeros(IM.shape)
IM = (Idx_IM==k).choose(Idx_IM,255+k)
IM = (IM<255+k).choose(IM,0)
IM = (IM>0).choose(IM,1)
temp = max(IM.sum(),1)
num[0] = log10(temp)/log10(r);
for j in range(2,self.ite_num+1):
mask = np.ones((j,j))
bw = scipy.signal.convolve2d(IM, mask,mode="full")[1:,1:]
indx = np.arange(0,IM.shape[0],j)
indy = np.arange(0,IM.shape[1],j)
bw = bw[np.ix_(indx,indy)]
idx = (bw>0).sum()
temp = max(idx,1)
num[j-1] = log10(temp)/log10(r/j)
MFS[k-1] = sum(c*num)
return MFS
| {
"content_hash": "1b720566efbe1432e8dfce0b383d9619",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 108,
"avg_line_length": 35.472636815920396,
"alnum_prop": 0.5570827489481066,
"repo_name": "rbaravalle/imfractal",
"id": "7562343c57fe08bd4b58ab4123eb72c691112db0",
"size": "7130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Algorithm/MFS.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "852"
},
{
"name": "Matlab",
"bytes": "5974"
},
{
"name": "Python",
"bytes": "315258"
},
{
"name": "R",
"bytes": "1076"
}
],
"symlink_target": ""
} |
import re
import warnings
import numpy as np
from matplotlib import rcParams
from astropy import units as u
from astropy.units import UnitsError
from astropy.coordinates import Angle
DMS_RE = re.compile('^dd(:mm(:ss(.(s)+)?)?)?$')
HMS_RE = re.compile('^hh(:mm(:ss(.(s)+)?)?)?$')
DDEC_RE = re.compile('^d(.(d)+)?$')
DMIN_RE = re.compile('^m(.(m)+)?$')
DSEC_RE = re.compile('^s(.(s)+)?$')
SCAL_RE = re.compile('^x(.(x)+)?$')
# Units with custom representations - see the note where it is used inside
# AngleFormatterLocator.formatter for more details.
CUSTOM_UNITS = {
u.degree: u.def_unit('custom_degree', represents=u.degree,
format={'generic': '\xb0',
'latex': r'^\circ',
'unicode': '°'}),
u.arcmin: u.def_unit('custom_arcmin', represents=u.arcmin,
format={'generic': "'",
'latex': r'^\prime',
'unicode': '′'}),
u.arcsec: u.def_unit('custom_arcsec', represents=u.arcsec,
format={'generic': '"',
'latex': r'^{\prime\prime}',
'unicode': '″'}),
u.hourangle: u.def_unit('custom_hourangle', represents=u.hourangle,
format={'generic': 'h',
'latex': r'^\mathrm{h}',
'unicode': r'$\mathregular{^h}$'})}
class BaseFormatterLocator:
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, format_unit=None):
if len([x for x in (values, number, spacing) if x is None]) < 2:
raise ValueError("At most one of values/number/spacing can be specifed")
self._unit = unit
self._format_unit = format_unit or unit
if values is not None:
self.values = values
elif number is not None:
self.number = number
elif spacing is not None:
self.spacing = spacing
else:
self.number = 5
self.format = format
@property
def values(self):
return self._values
@values.setter
def values(self, values):
if not isinstance(values, u.Quantity) or (not values.ndim == 1):
raise TypeError("values should be an astropy.units.Quantity array")
if not values.unit.is_equivalent(self._unit):
raise UnitsError("value should be in units compatible with "
"coordinate units ({}) but found {}".format(self._unit, values.unit))
self._number = None
self._spacing = None
self._values = values
@property
def number(self):
return self._number
@number.setter
def number(self, number):
self._number = number
self._spacing = None
self._values = None
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
self._number = None
self._spacing = spacing
self._values = None
def minor_locator(self, spacing, frequency, value_min, value_max):
if self.values is not None:
return [] * self._unit
minor_spacing = spacing.value / frequency
values = self._locate_values(value_min, value_max, minor_spacing)
index = np.where((values % frequency) == 0)
index = index[0][0]
values = np.delete(values, np.s_[index::frequency])
return values * minor_spacing * self._unit
@property
def format_unit(self):
return self._format_unit
@format_unit.setter
def format_unit(self, unit):
self._format_unit = u.Unit(unit)
@staticmethod
def _locate_values(value_min, value_max, spacing):
imin = np.ceil(value_min / spacing)
imax = np.floor(value_max / spacing)
values = np.arange(imin, imax + 1, dtype=int)
return values
class AngleFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, decimal=None, format_unit=None, show_decimal_unit=True):
if unit is None:
unit = u.degree
if format_unit is None:
format_unit = unit
if format_unit not in (u.degree, u.hourangle, u.hour):
if decimal is False:
raise UnitsError("Units should be degrees or hours when using non-decimal (sexagesimal) mode")
self._decimal = decimal
self._sep = None
self.show_decimal_unit = show_decimal_unit
super().__init__(values=values, number=number, spacing=spacing,
format=format, unit=unit, format_unit=format_unit)
@property
def decimal(self):
decimal = self._decimal
if self.format_unit not in (u.degree, u.hourangle, u.hour):
if self._decimal is None:
decimal = True
elif self._decimal is False:
raise UnitsError("Units should be degrees or hours when using non-decimal (sexagesimal) mode")
elif self._decimal is None:
decimal = False
return decimal
@decimal.setter
def decimal(self, value):
self._decimal = value
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and (not isinstance(spacing, u.Quantity) or
spacing.unit.physical_type != 'angle'):
raise TypeError("spacing should be an astropy.units.Quantity "
"instance with units of angle")
self._number = None
self._spacing = spacing
self._values = None
@property
def sep(self):
return self._sep
@sep.setter
def sep(self, separator):
self._sep = separator
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if DMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.degree
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif HMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.hourangle
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif DDEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.degree
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DMIN_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcmin
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DSEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcsec
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
else:
raise ValueError(f"Invalid format: {value}")
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
@property
def base_spacing(self):
if self.decimal:
spacing = self._format_unit / (10. ** self._precision)
else:
if self._fields == 1:
spacing = 1. * u.degree
elif self._fields == 2:
spacing = 1. * u.arcmin
elif self._fields == 3:
if self._precision == 0:
spacing = 1. * u.arcsec
else:
spacing = u.arcsec / (10. ** self._precision)
if self._format_unit is u.hourangle:
spacing *= 15
return spacing
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * u.arcsec
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced). We return a
# non-zero spacing in case the caller needs to format a single
# coordinate, e.g. for mousover.
if value_min == value_max:
return [] * self._unit, 1 * u.arcsec
if self.spacing is not None:
# spacing was manually specified
spacing_value = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing_value = self.base_spacing.to_value(self._unit)
else:
# otherwise we clip to the nearest 'sensible' spacing
if self.decimal:
from .utils import select_step_scalar
spacing_value = select_step_scalar(dv.to_value(self._format_unit)) * self._format_unit.to(self._unit)
else:
if self._format_unit is u.degree:
from .utils import select_step_degree
spacing_value = select_step_degree(dv).to_value(self._unit)
else:
from .utils import select_step_hour
spacing_value = select_step_hour(dv).to_value(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this.
values = self._locate_values(value_min, value_max, spacing_value)
return values * spacing_value * self._unit, spacing_value * self._unit
def formatter(self, values, spacing, format='auto'):
if not isinstance(values, u.Quantity) and values is not None:
raise TypeError("values should be a Quantities array")
if len(values) > 0:
decimal = self.decimal
unit = self._format_unit
if unit is u.hour:
unit = u.hourangle
if self.format is None:
if decimal:
# Here we assume the spacing can be arbitrary, so for example
# 1.000223 degrees, in which case we don't want to have a
# format that rounds to degrees. So we find the number of
# decimal places we get from representing the spacing as a
# string in the desired units. The easiest way to find
# the smallest number of decimal places required is to
# format the number as a decimal float and strip any zeros
# from the end. We do this rather than just trusting e.g.
# str() because str(15.) == 15.0. We format using 10 decimal
# places by default before stripping the zeros since this
# corresponds to a resolution of less than a microarcecond,
# which should be sufficient.
spacing = spacing.to_value(unit)
fields = 0
precision = len(f"{spacing:.10f}".replace('0', ' ').strip().split('.', 1)[1])
else:
spacing = spacing.to_value(unit / 3600)
if spacing >= 3600:
fields = 1
precision = 0
elif spacing >= 60:
fields = 2
precision = 0
elif spacing >= 1:
fields = 3
precision = 0
else:
fields = 3
precision = -int(np.floor(np.log10(spacing)))
else:
fields = self._fields
precision = self._precision
is_latex = format == 'latex' or (format == 'auto' and rcParams['text.usetex'])
if decimal:
# At the moment, the Angle class doesn't have a consistent way
# to always convert angles to strings in decimal form with
# symbols for units (instead of e.g 3arcsec). So as a workaround
# we take advantage of the fact that Angle.to_string converts
# the unit to a string manually when decimal=False and the unit
# is not strictly u.degree or u.hourangle
if self.show_decimal_unit:
decimal = False
sep = 'fromunit'
if is_latex:
fmt = 'latex'
else:
if unit is u.hourangle:
fmt = 'unicode'
else:
fmt = None
unit = CUSTOM_UNITS.get(unit, unit)
else:
sep = None
fmt = None
elif self.sep is not None:
sep = self.sep
fmt = None
else:
sep = 'fromunit'
if unit == u.degree:
if is_latex:
fmt = 'latex'
else:
sep = ('\xb0', "'", '"')
fmt = None
else:
if format == 'ascii':
fmt = None
elif is_latex:
fmt = 'latex'
else:
# Here we still use LaTeX but this is for Matplotlib's
# LaTeX engine - we can't use fmt='latex' as this
# doesn't produce LaTeX output that respects the fonts.
sep = (r'$\mathregular{^h}$', r'$\mathregular{^m}$', r'$\mathregular{^s}$')
fmt = None
angles = Angle(values)
string = angles.to_string(unit=unit,
precision=precision,
decimal=decimal,
fields=fields,
sep=sep,
format=fmt).tolist()
return string
else:
return []
class ScalarFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, format_unit=None):
if unit is not None:
unit = unit
format_unit = format_unit or unit
elif spacing is not None:
unit = spacing.unit
format_unit = format_unit or spacing.unit
elif values is not None:
unit = values.unit
format_unit = format_unit or values.unit
super().__init__(values=values, number=number, spacing=spacing,
format=format, unit=unit, format_unit=format_unit)
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and not isinstance(spacing, u.Quantity):
raise TypeError("spacing should be an astropy.units.Quantity instance")
self._number = None
self._spacing = spacing
self._values = None
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if SCAL_RE.match(value) is not None:
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
elif not value.startswith('%'):
raise ValueError(f"Invalid format: {value}")
@property
def base_spacing(self):
return self._format_unit / (10. ** self._precision)
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * self._unit
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced).
if value_min == value_max:
return [] * self._unit, 0 * self._unit
if self.spacing is not None:
# spacing was manually specified
spacing = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and (not self.format.startswith('%')) and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing = self.base_spacing.to_value(self._unit)
else:
from .utils import select_step_scalar
spacing = select_step_scalar(dv.to_value(self._format_unit)) * self._format_unit.to(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this
values = self._locate_values(value_min, value_max, spacing)
return values * spacing * self._unit, spacing * self._unit
def formatter(self, values, spacing, format='auto'):
if len(values) > 0:
if self.format is None:
if spacing.value < 1.:
precision = -int(np.floor(np.log10(spacing.value)))
else:
precision = 0
elif self.format.startswith('%'):
return [(self.format % x.value) for x in values]
else:
precision = self._precision
return [("{0:." + str(precision) + "f}").format(x.to_value(self._format_unit)) for x in values]
else:
return []
| {
"content_hash": "02e94870bc358e2df65d9af130542877",
"timestamp": "",
"source": "github",
"line_count": 571,
"max_line_length": 125,
"avg_line_length": 36.32749562171629,
"alnum_prop": 0.5153545774478138,
"repo_name": "dhomeier/astropy",
"id": "de95471cf5cb3d54c2d5b06e1cae26f015351178",
"size": "21245",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "astropy/visualization/wcsaxes/formatter_locator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "10891881"
},
{
"name": "C++",
"bytes": "55147"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "181654"
},
{
"name": "M4",
"bytes": "18016"
},
{
"name": "Makefile",
"bytes": "51059"
},
{
"name": "Python",
"bytes": "10582251"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
import sys
from nose.plugins.skip import SkipTest
sys.path[0:0] = [""]
import datetime
import unittest
import uuid
import math
import itertools
import re
try:
import dateutil
except ImportError:
dateutil = None
from decimal import Decimal
from bson import Binary, DBRef, ObjectId
from mongoengine import *
from mongoengine.connection import get_db
from mongoengine.base import _document_registry
from mongoengine.base.datastructures import BaseDict, EmbeddedDocumentList
from mongoengine.errors import NotRegistered
from mongoengine.python_support import PY3, b, bin_type
__all__ = ("FieldTest", "EmbeddedDocumentListFieldTestCase")
class FieldTest(unittest.TestCase):
def setUp(self):
connect(db='mongoenginetest')
self.db = get_db()
def tearDown(self):
self.db.drop_collection('fs.files')
self.db.drop_collection('fs.chunks')
def test_default_values_nothing_set(self):
"""Ensure that default field values are used when creating a document.
"""
class Person(Document):
name = StringField()
age = IntField(default=30, required=False)
userid = StringField(default=lambda: 'test', required=True)
created = DateTimeField(default=datetime.datetime.utcnow)
person = Person(name="Ross")
# Confirm saving now would store values
data_to_be_saved = sorted(person.to_mongo().keys())
self.assertEqual(
data_to_be_saved, ['age', 'created', 'name', 'userid'])
self.assertTrue(person.validate() is None)
self.assertEqual(person.name, person.name)
self.assertEqual(person.age, person.age)
self.assertEqual(person.userid, person.userid)
self.assertEqual(person.created, person.created)
self.assertEqual(person._data['name'], person.name)
self.assertEqual(person._data['age'], person.age)
self.assertEqual(person._data['userid'], person.userid)
self.assertEqual(person._data['created'], person.created)
# Confirm introspection changes nothing
data_to_be_saved = sorted(person.to_mongo().keys())
self.assertEqual(
data_to_be_saved, ['age', 'created', 'name', 'userid'])
def test_default_values_set_to_None(self):
"""Ensure that default field values are used when creating a document.
"""
class Person(Document):
name = StringField()
age = IntField(default=30, required=False)
userid = StringField(default=lambda: 'test', required=True)
created = DateTimeField(default=datetime.datetime.utcnow)
# Trying setting values to None
person = Person(name=None, age=None, userid=None, created=None)
# Confirm saving now would store values
data_to_be_saved = sorted(person.to_mongo().keys())
self.assertEqual(data_to_be_saved, ['age', 'created', 'userid'])
self.assertTrue(person.validate() is None)
self.assertEqual(person.name, person.name)
self.assertEqual(person.age, person.age)
self.assertEqual(person.userid, person.userid)
self.assertEqual(person.created, person.created)
self.assertEqual(person._data['name'], person.name)
self.assertEqual(person._data['age'], person.age)
self.assertEqual(person._data['userid'], person.userid)
self.assertEqual(person._data['created'], person.created)
# Confirm introspection changes nothing
data_to_be_saved = sorted(person.to_mongo().keys())
self.assertEqual(data_to_be_saved, ['age', 'created', 'userid'])
def test_default_values_when_setting_to_None(self):
"""Ensure that default field values are used when creating a document.
"""
class Person(Document):
name = StringField()
age = IntField(default=30, required=False)
userid = StringField(default=lambda: 'test', required=True)
created = DateTimeField(default=datetime.datetime.utcnow)
person = Person()
person.name = None
person.age = None
person.userid = None
person.created = None
# Confirm saving now would store values
data_to_be_saved = sorted(person.to_mongo().keys())
self.assertEqual(data_to_be_saved, ['age', 'created', 'userid'])
self.assertTrue(person.validate() is None)
self.assertEqual(person.name, person.name)
self.assertEqual(person.age, person.age)
self.assertEqual(person.userid, person.userid)
self.assertEqual(person.created, person.created)
self.assertEqual(person._data['name'], person.name)
self.assertEqual(person._data['age'], person.age)
self.assertEqual(person._data['userid'], person.userid)
self.assertEqual(person._data['created'], person.created)
# Confirm introspection changes nothing
data_to_be_saved = sorted(person.to_mongo().keys())
self.assertEqual(data_to_be_saved, ['age', 'created', 'userid'])
def test_default_values_when_deleting_value(self):
"""Ensure that default field values are used when creating a document.
"""
class Person(Document):
name = StringField()
age = IntField(default=30, required=False)
userid = StringField(default=lambda: 'test', required=True)
created = DateTimeField(default=datetime.datetime.utcnow)
person = Person(name="Ross")
del person.name
del person.age
del person.userid
del person.created
data_to_be_saved = sorted(person.to_mongo().keys())
self.assertEqual(data_to_be_saved, ['age', 'created', 'userid'])
self.assertTrue(person.validate() is None)
self.assertEqual(person.name, person.name)
self.assertEqual(person.age, person.age)
self.assertEqual(person.userid, person.userid)
self.assertEqual(person.created, person.created)
self.assertEqual(person._data['name'], person.name)
self.assertEqual(person._data['age'], person.age)
self.assertEqual(person._data['userid'], person.userid)
self.assertEqual(person._data['created'], person.created)
# Confirm introspection changes nothing
data_to_be_saved = sorted(person.to_mongo().keys())
self.assertEqual(data_to_be_saved, ['age', 'created', 'userid'])
def test_required_values(self):
"""Ensure that required field constraints are enforced.
"""
class Person(Document):
name = StringField(required=True)
age = IntField(required=True)
userid = StringField()
person = Person(name="Test User")
self.assertRaises(ValidationError, person.validate)
person = Person(age=30)
self.assertRaises(ValidationError, person.validate)
def test_not_required_handles_none_in_update(self):
"""Ensure that every fields should accept None if required is False.
"""
class HandleNoneFields(Document):
str_fld = StringField()
int_fld = IntField()
flt_fld = FloatField()
comp_dt_fld = ComplexDateTimeField()
HandleNoneFields.drop_collection()
doc = HandleNoneFields()
doc.str_fld = u'spam ham egg'
doc.int_fld = 42
doc.flt_fld = 4.2
doc.com_dt_fld = datetime.datetime.utcnow()
doc.save()
res = HandleNoneFields.objects(id=doc.id).update(
set__str_fld=None,
set__int_fld=None,
set__flt_fld=None,
set__comp_dt_fld=None,
)
self.assertEqual(res, 1)
# Retrive data from db and verify it.
ret = HandleNoneFields.objects.all()[0]
self.assertEqual(ret.str_fld, None)
self.assertEqual(ret.int_fld, None)
self.assertEqual(ret.flt_fld, None)
# Return current time if retrived value is None.
self.assertTrue(isinstance(ret.comp_dt_fld, datetime.datetime))
def test_not_required_handles_none_from_database(self):
"""Ensure that every fields can handle null values from the database.
"""
class HandleNoneFields(Document):
str_fld = StringField(required=True)
int_fld = IntField(required=True)
flt_fld = FloatField(required=True)
comp_dt_fld = ComplexDateTimeField(required=True)
HandleNoneFields.drop_collection()
doc = HandleNoneFields()
doc.str_fld = u'spam ham egg'
doc.int_fld = 42
doc.flt_fld = 4.2
doc.com_dt_fld = datetime.datetime.utcnow()
doc.save()
collection = self.db[HandleNoneFields._get_collection_name()]
obj = collection.update({"_id": doc.id}, {"$unset": {
"str_fld": 1,
"int_fld": 1,
"flt_fld": 1,
"comp_dt_fld": 1}
})
# Retrive data from db and verify it.
ret = HandleNoneFields.objects.all()[0]
self.assertEqual(ret.str_fld, None)
self.assertEqual(ret.int_fld, None)
self.assertEqual(ret.flt_fld, None)
# Return current time if retrived value is None.
self.assertTrue(isinstance(ret.comp_dt_fld, datetime.datetime))
self.assertRaises(ValidationError, ret.validate)
def test_int_and_float_ne_operator(self):
class TestDocument(Document):
int_fld = IntField()
float_fld = FloatField()
TestDocument.drop_collection()
TestDocument(int_fld=None, float_fld=None).save()
TestDocument(int_fld=1, float_fld=1).save()
self.assertEqual(1, TestDocument.objects(int_fld__ne=None).count())
self.assertEqual(1, TestDocument.objects(float_fld__ne=None).count())
def test_long_ne_operator(self):
class TestDocument(Document):
long_fld = LongField()
TestDocument.drop_collection()
TestDocument(long_fld=None).save()
TestDocument(long_fld=1).save()
self.assertEqual(1, TestDocument.objects(long_fld__ne=None).count())
def test_object_id_validation(self):
"""Ensure that invalid values cannot be assigned to string fields.
"""
class Person(Document):
name = StringField()
person = Person(name='Test User')
self.assertEqual(person.id, None)
person.id = 47
self.assertRaises(ValidationError, person.validate)
person.id = 'abc'
self.assertRaises(ValidationError, person.validate)
person.id = '497ce96f395f2f052a494fd4'
person.validate()
def test_string_validation(self):
"""Ensure that invalid values cannot be assigned to string fields.
"""
class Person(Document):
name = StringField(max_length=20)
userid = StringField(r'[0-9a-z_]+$')
person = Person(name=34)
self.assertRaises(ValidationError, person.validate)
# Test regex validation on userid
person = Person(userid='test.User')
self.assertRaises(ValidationError, person.validate)
person.userid = 'test_user'
self.assertEqual(person.userid, 'test_user')
person.validate()
# Test max length validation on name
person = Person(name='Name that is more than twenty characters')
self.assertRaises(ValidationError, person.validate)
person.name = 'Shorter name'
person.validate()
def test_url_validation(self):
"""Ensure that URLFields validate urls properly.
"""
class Link(Document):
url = URLField()
link = Link()
link.url = 'google'
self.assertRaises(ValidationError, link.validate)
link.url = 'http://www.google.com:8080'
link.validate()
def test_int_validation(self):
"""Ensure that invalid values cannot be assigned to int fields.
"""
class Person(Document):
age = IntField(min_value=0, max_value=110)
person = Person()
person.age = 50
person.validate()
person.age = -1
self.assertRaises(ValidationError, person.validate)
person.age = 120
self.assertRaises(ValidationError, person.validate)
person.age = 'ten'
self.assertRaises(ValidationError, person.validate)
def test_long_validation(self):
"""Ensure that invalid values cannot be assigned to long fields.
"""
class TestDocument(Document):
value = LongField(min_value=0, max_value=110)
doc = TestDocument()
doc.value = 50
doc.validate()
doc.value = -1
self.assertRaises(ValidationError, doc.validate)
doc.age = 120
self.assertRaises(ValidationError, doc.validate)
doc.age = 'ten'
self.assertRaises(ValidationError, doc.validate)
def test_float_validation(self):
"""Ensure that invalid values cannot be assigned to float fields.
"""
class Person(Document):
height = FloatField(min_value=0.1, max_value=3.5)
person = Person()
person.height = 1.89
person.validate()
person.height = '2.0'
self.assertRaises(ValidationError, person.validate)
person.height = 0.01
self.assertRaises(ValidationError, person.validate)
person.height = 4.0
self.assertRaises(ValidationError, person.validate)
person_2 = Person(height='something invalid')
self.assertRaises(ValidationError, person_2.validate)
def test_decimal_validation(self):
"""Ensure that invalid values cannot be assigned to decimal fields.
"""
class Person(Document):
height = DecimalField(min_value=Decimal('0.1'),
max_value=Decimal('3.5'))
Person.drop_collection()
Person(height=Decimal('1.89')).save()
person = Person.objects.first()
self.assertEqual(person.height, Decimal('1.89'))
person.height = '2.0'
person.save()
person.height = 0.01
self.assertRaises(ValidationError, person.validate)
person.height = Decimal('0.01')
self.assertRaises(ValidationError, person.validate)
person.height = Decimal('4.0')
self.assertRaises(ValidationError, person.validate)
person.height = 'something invalid'
self.assertRaises(ValidationError, person.validate)
person_2 = Person(height='something invalid')
self.assertRaises(ValidationError, person_2.validate)
Person.drop_collection()
def test_decimal_comparison(self):
class Person(Document):
money = DecimalField()
Person.drop_collection()
Person(money=6).save()
Person(money=8).save()
Person(money=10).save()
self.assertEqual(2, Person.objects(money__gt=Decimal("7")).count())
self.assertEqual(2, Person.objects(money__gt=7).count())
self.assertEqual(2, Person.objects(money__gt="7").count())
def test_decimal_storage(self):
class Person(Document):
btc = DecimalField(precision=4)
Person.drop_collection()
Person(btc=10).save()
Person(btc=10.1).save()
Person(btc=10.11).save()
Person(btc="10.111").save()
Person(btc=Decimal("10.1111")).save()
Person(btc=Decimal("10.11111")).save()
# How its stored
expected = [{'btc': 10.0}, {'btc': 10.1}, {'btc': 10.11},
{'btc': 10.111}, {'btc': 10.1111}, {'btc': 10.1111}]
actual = list(Person.objects.exclude('id').as_pymongo())
self.assertEqual(expected, actual)
# How it comes out locally
expected = [Decimal('10.0000'), Decimal('10.1000'), Decimal('10.1100'),
Decimal('10.1110'), Decimal('10.1111'), Decimal('10.1111')]
actual = list(Person.objects().scalar('btc'))
self.assertEqual(expected, actual)
def test_boolean_validation(self):
"""Ensure that invalid values cannot be assigned to boolean fields.
"""
class Person(Document):
admin = BooleanField()
person = Person()
person.admin = True
person.validate()
person.admin = 2
self.assertRaises(ValidationError, person.validate)
person.admin = 'Yes'
self.assertRaises(ValidationError, person.validate)
def test_uuid_field_string(self):
"""Test UUID fields storing as String
"""
class Person(Document):
api_key = UUIDField(binary=False)
Person.drop_collection()
uu = uuid.uuid4()
Person(api_key=uu).save()
self.assertEqual(1, Person.objects(api_key=uu).count())
self.assertEqual(uu, Person.objects.first().api_key)
person = Person()
valid = (uuid.uuid4(), uuid.uuid1())
for api_key in valid:
person.api_key = api_key
person.validate()
invalid = ('9d159858-549b-4975-9f98-dd2f987c113g',
'9d159858-549b-4975-9f98-dd2f987c113')
for api_key in invalid:
person.api_key = api_key
self.assertRaises(ValidationError, person.validate)
def test_uuid_field_binary(self):
"""Test UUID fields storing as Binary object
"""
class Person(Document):
api_key = UUIDField(binary=True)
Person.drop_collection()
uu = uuid.uuid4()
Person(api_key=uu).save()
self.assertEqual(1, Person.objects(api_key=uu).count())
self.assertEqual(uu, Person.objects.first().api_key)
person = Person()
valid = (uuid.uuid4(), uuid.uuid1())
for api_key in valid:
person.api_key = api_key
person.validate()
invalid = ('9d159858-549b-4975-9f98-dd2f987c113g',
'9d159858-549b-4975-9f98-dd2f987c113')
for api_key in invalid:
person.api_key = api_key
self.assertRaises(ValidationError, person.validate)
def test_datetime_validation(self):
"""Ensure that invalid values cannot be assigned to datetime fields.
"""
class LogEntry(Document):
time = DateTimeField()
log = LogEntry()
log.time = datetime.datetime.now()
log.validate()
log.time = datetime.date.today()
log.validate()
log.time = datetime.datetime.now().isoformat(' ')
log.validate()
if dateutil:
log.time = datetime.datetime.now().isoformat('T')
log.validate()
log.time = -1
self.assertRaises(ValidationError, log.validate)
log.time = 'ABC'
self.assertRaises(ValidationError, log.validate)
def test_datetime_tz_aware_mark_as_changed(self):
from mongoengine import connection
# Reset the connections
connection._connection_settings = {}
connection._connections = {}
connection._dbs = {}
connect(db='mongoenginetest', tz_aware=True)
class LogEntry(Document):
time = DateTimeField()
LogEntry.drop_collection()
LogEntry(time=datetime.datetime(2013, 1, 1, 0, 0, 0)).save()
log = LogEntry.objects.first()
log.time = datetime.datetime(2013, 1, 1, 0, 0, 0)
self.assertEqual(['time'], log._changed_fields)
def test_datetime(self):
"""Tests showing pymongo datetime fields handling of microseconds.
Microseconds are rounded to the nearest millisecond and pre UTC
handling is wonky.
See: http://api.mongodb.org/python/current/api/bson/son.html#dt
"""
class LogEntry(Document):
date = DateTimeField()
LogEntry.drop_collection()
# Test can save dates
log = LogEntry()
log.date = datetime.date.today()
log.save()
log.reload()
self.assertEqual(log.date.date(), datetime.date.today())
LogEntry.drop_collection()
# Post UTC - microseconds are rounded (down) nearest millisecond and
# dropped
d1 = datetime.datetime(1970, 01, 01, 00, 00, 01, 999)
d2 = datetime.datetime(1970, 01, 01, 00, 00, 01)
log = LogEntry()
log.date = d1
log.save()
log.reload()
self.assertNotEqual(log.date, d1)
self.assertEqual(log.date, d2)
# Post UTC - microseconds are rounded (down) nearest millisecond
d1 = datetime.datetime(1970, 01, 01, 00, 00, 01, 9999)
d2 = datetime.datetime(1970, 01, 01, 00, 00, 01, 9000)
log.date = d1
log.save()
log.reload()
self.assertNotEqual(log.date, d1)
self.assertEqual(log.date, d2)
if not PY3:
# Pre UTC dates microseconds below 1000 are dropped
# This does not seem to be true in PY3
d1 = datetime.datetime(1969, 12, 31, 23, 59, 59, 999)
d2 = datetime.datetime(1969, 12, 31, 23, 59, 59)
log.date = d1
log.save()
log.reload()
self.assertNotEqual(log.date, d1)
self.assertEqual(log.date, d2)
LogEntry.drop_collection()
def test_datetime_usage(self):
"""Tests for regular datetime fields"""
class LogEntry(Document):
date = DateTimeField()
LogEntry.drop_collection()
d1 = datetime.datetime(1970, 01, 01, 00, 00, 01)
log = LogEntry()
log.date = d1
log.validate()
log.save()
for query in (d1, d1.isoformat(' ')):
log1 = LogEntry.objects.get(date=query)
self.assertEqual(log, log1)
if dateutil:
log1 = LogEntry.objects.get(date=d1.isoformat('T'))
self.assertEqual(log, log1)
LogEntry.drop_collection()
# create 60 log entries
for i in xrange(1950, 2010):
d = datetime.datetime(i, 01, 01, 00, 00, 01)
LogEntry(date=d).save()
self.assertEqual(LogEntry.objects.count(), 60)
# Test ordering
logs = LogEntry.objects.order_by("date")
count = logs.count()
i = 0
while i == count - 1:
self.assertTrue(logs[i].date <= logs[i + 1].date)
i += 1
logs = LogEntry.objects.order_by("-date")
count = logs.count()
i = 0
while i == count - 1:
self.assertTrue(logs[i].date >= logs[i + 1].date)
i += 1
# Test searching
logs = LogEntry.objects.filter(date__gte=datetime.datetime(1980, 1, 1))
self.assertEqual(logs.count(), 30)
logs = LogEntry.objects.filter(date__lte=datetime.datetime(1980, 1, 1))
self.assertEqual(logs.count(), 30)
logs = LogEntry.objects.filter(
date__lte=datetime.datetime(2011, 1, 1),
date__gte=datetime.datetime(2000, 1, 1),
)
self.assertEqual(logs.count(), 10)
LogEntry.drop_collection()
def test_complexdatetime_storage(self):
"""Tests for complex datetime fields - which can handle microseconds
without rounding.
"""
class LogEntry(Document):
date = ComplexDateTimeField()
date_with_dots = ComplexDateTimeField(separator='.')
LogEntry.drop_collection()
# Post UTC - microseconds are rounded (down) nearest millisecond and
# dropped - with default datetimefields
d1 = datetime.datetime(1970, 01, 01, 00, 00, 01, 999)
log = LogEntry()
log.date = d1
log.save()
log.reload()
self.assertEqual(log.date, d1)
# Post UTC - microseconds are rounded (down) nearest millisecond - with
# default datetimefields
d1 = datetime.datetime(1970, 01, 01, 00, 00, 01, 9999)
log.date = d1
log.save()
log.reload()
self.assertEqual(log.date, d1)
# Pre UTC dates microseconds below 1000 are dropped - with default
# datetimefields
d1 = datetime.datetime(1969, 12, 31, 23, 59, 59, 999)
log.date = d1
log.save()
log.reload()
self.assertEqual(log.date, d1)
# Pre UTC microseconds above 1000 is wonky - with default datetimefields
# log.date has an invalid microsecond value so I can't construct
# a date to compare.
for i in xrange(1001, 3113, 33):
d1 = datetime.datetime(1969, 12, 31, 23, 59, 59, i)
log.date = d1
log.save()
log.reload()
self.assertEqual(log.date, d1)
log1 = LogEntry.objects.get(date=d1)
self.assertEqual(log, log1)
# Test string padding
microsecond = map(int, [math.pow(10, x) for x in xrange(6)])
mm = dd = hh = ii = ss = [1, 10]
for values in itertools.product([2014], mm, dd, hh, ii, ss, microsecond):
stored = LogEntry(date=datetime.datetime(*values)).to_mongo()['date']
self.assertTrue(re.match('^\d{4},\d{2},\d{2},\d{2},\d{2},\d{2},\d{6}$', stored) is not None)
# Test separator
stored = LogEntry(date_with_dots=datetime.datetime(2014, 1, 1)).to_mongo()['date_with_dots']
self.assertTrue(re.match('^\d{4}.\d{2}.\d{2}.\d{2}.\d{2}.\d{2}.\d{6}$', stored) is not None)
LogEntry.drop_collection()
def test_complexdatetime_usage(self):
"""Tests for complex datetime fields - which can handle microseconds
without rounding.
"""
class LogEntry(Document):
date = ComplexDateTimeField()
LogEntry.drop_collection()
d1 = datetime.datetime(1970, 01, 01, 00, 00, 01, 999)
log = LogEntry()
log.date = d1
log.save()
log1 = LogEntry.objects.get(date=d1)
self.assertEqual(log, log1)
LogEntry.drop_collection()
# create 60 log entries
for i in xrange(1950, 2010):
d = datetime.datetime(i, 01, 01, 00, 00, 01, 999)
LogEntry(date=d).save()
self.assertEqual(LogEntry.objects.count(), 60)
# Test ordering
logs = LogEntry.objects.order_by("date")
count = logs.count()
i = 0
while i == count - 1:
self.assertTrue(logs[i].date <= logs[i + 1].date)
i += 1
logs = LogEntry.objects.order_by("-date")
count = logs.count()
i = 0
while i == count - 1:
self.assertTrue(logs[i].date >= logs[i + 1].date)
i += 1
# Test searching
logs = LogEntry.objects.filter(date__gte=datetime.datetime(1980, 1, 1))
self.assertEqual(logs.count(), 30)
logs = LogEntry.objects.filter(date__lte=datetime.datetime(1980, 1, 1))
self.assertEqual(logs.count(), 30)
logs = LogEntry.objects.filter(
date__lte=datetime.datetime(2011, 1, 1),
date__gte=datetime.datetime(2000, 1, 1),
)
self.assertEqual(logs.count(), 10)
LogEntry.drop_collection()
# Test microsecond-level ordering/filtering
for microsecond in (99, 999, 9999, 10000):
LogEntry(date=datetime.datetime(2015, 1, 1, 0, 0, 0, microsecond)).save()
logs = list(LogEntry.objects.order_by('date'))
for next_idx, log in enumerate(logs[:-1], start=1):
next_log = logs[next_idx]
self.assertTrue(log.date < next_log.date)
logs = list(LogEntry.objects.order_by('-date'))
for next_idx, log in enumerate(logs[:-1], start=1):
next_log = logs[next_idx]
self.assertTrue(log.date > next_log.date)
logs = LogEntry.objects.filter(date__lte=datetime.datetime(2015, 1, 1, 0, 0, 0, 10000))
self.assertEqual(logs.count(), 4)
LogEntry.drop_collection()
def test_list_validation(self):
"""Ensure that a list field only accepts lists with valid elements.
"""
class User(Document):
pass
class Comment(EmbeddedDocument):
content = StringField()
class BlogPost(Document):
content = StringField()
comments = ListField(EmbeddedDocumentField(Comment))
tags = ListField(StringField())
authors = ListField(ReferenceField(User))
generic = ListField(GenericReferenceField())
post = BlogPost(content='Went for a walk today...')
post.validate()
post.tags = 'fun'
self.assertRaises(ValidationError, post.validate)
post.tags = [1, 2]
self.assertRaises(ValidationError, post.validate)
post.tags = ['fun', 'leisure']
post.validate()
post.tags = ('fun', 'leisure')
post.validate()
post.comments = ['a']
self.assertRaises(ValidationError, post.validate)
post.comments = 'yay'
self.assertRaises(ValidationError, post.validate)
comments = [Comment(content='Good for you'), Comment(content='Yay.')]
post.comments = comments
post.validate()
post.authors = [Comment()]
self.assertRaises(ValidationError, post.validate)
post.authors = [User()]
self.assertRaises(ValidationError, post.validate)
user = User()
user.save()
post.authors = [user]
post.validate()
post.generic = [1, 2]
self.assertRaises(ValidationError, post.validate)
post.generic = [User(), Comment()]
self.assertRaises(ValidationError, post.validate)
post.generic = [Comment()]
self.assertRaises(ValidationError, post.validate)
post.generic = [user]
post.validate()
User.drop_collection()
BlogPost.drop_collection()
def test_sorted_list_sorting(self):
"""Ensure that a sorted list field properly sorts values.
"""
class Comment(EmbeddedDocument):
order = IntField()
content = StringField()
class BlogPost(Document):
content = StringField()
comments = SortedListField(EmbeddedDocumentField(Comment),
ordering='order')
tags = SortedListField(StringField())
post = BlogPost(content='Went for a walk today...')
post.save()
post.tags = ['leisure', 'fun']
post.save()
post.reload()
self.assertEqual(post.tags, ['fun', 'leisure'])
comment1 = Comment(content='Good for you', order=1)
comment2 = Comment(content='Yay.', order=0)
comments = [comment1, comment2]
post.comments = comments
post.save()
post.reload()
self.assertEqual(post.comments[0].content, comment2.content)
self.assertEqual(post.comments[1].content, comment1.content)
post.comments[0].order = 2
post.save()
post.reload()
self.assertEqual(post.comments[0].content, comment1.content)
self.assertEqual(post.comments[1].content, comment2.content)
BlogPost.drop_collection()
def test_reverse_list_sorting(self):
'''Ensure that a reverse sorted list field properly sorts values'''
class Category(EmbeddedDocument):
count = IntField()
name = StringField()
class CategoryList(Document):
categories = SortedListField(EmbeddedDocumentField(Category),
ordering='count', reverse=True)
name = StringField()
catlist = CategoryList(name="Top categories")
cat1 = Category(name='posts', count=10)
cat2 = Category(name='food', count=100)
cat3 = Category(name='drink', count=40)
catlist.categories = [cat1, cat2, cat3]
catlist.save()
catlist.reload()
self.assertEqual(catlist.categories[0].name, cat2.name)
self.assertEqual(catlist.categories[1].name, cat3.name)
self.assertEqual(catlist.categories[2].name, cat1.name)
CategoryList.drop_collection()
def test_list_field(self):
"""Ensure that list types work as expected.
"""
class BlogPost(Document):
info = ListField()
BlogPost.drop_collection()
post = BlogPost()
post.info = 'my post'
self.assertRaises(ValidationError, post.validate)
post.info = {'title': 'test'}
self.assertRaises(ValidationError, post.validate)
post.info = ['test']
post.save()
post = BlogPost()
post.info = [{'test': 'test'}]
post.save()
post = BlogPost()
post.info = [{'test': 3}]
post.save()
self.assertEqual(BlogPost.objects.count(), 3)
self.assertEqual(
BlogPost.objects.filter(info__exact='test').count(), 1)
self.assertEqual(
BlogPost.objects.filter(info__0__test='test').count(), 1)
# Confirm handles non strings or non existing keys
self.assertEqual(
BlogPost.objects.filter(info__0__test__exact='5').count(), 0)
self.assertEqual(
BlogPost.objects.filter(info__100__test__exact='test').count(), 0)
post = BlogPost()
post.info = ['1', '2']
post.save()
post = BlogPost.objects(info=['1', '2']).get()
post.info += ['3', '4']
post.save()
self.assertEqual(BlogPost.objects(info=['1', '2', '3', '4']).count(), 1)
post = BlogPost.objects(info=['1', '2', '3', '4']).get()
post.info *= 2
post.save()
self.assertEqual(BlogPost.objects(info=['1', '2', '3', '4', '1', '2', '3', '4']).count(), 1)
BlogPost.drop_collection()
def test_list_field_passed_in_value(self):
class Foo(Document):
bars = ListField(ReferenceField("Bar"))
class Bar(Document):
text = StringField()
bar = Bar(text="hi")
bar.save()
foo = Foo(bars=[])
foo.bars.append(bar)
self.assertEqual(repr(foo.bars), '[<Bar: Bar object>]')
def test_list_field_strict(self):
"""Ensure that list field handles validation if provided a strict field type."""
class Simple(Document):
mapping = ListField(field=IntField())
Simple.drop_collection()
e = Simple()
e.mapping = [1]
e.save()
def create_invalid_mapping():
e.mapping = ["abc"]
e.save()
self.assertRaises(ValidationError, create_invalid_mapping)
Simple.drop_collection()
def test_list_field_rejects_strings(self):
"""Strings aren't valid list field data types"""
class Simple(Document):
mapping = ListField()
Simple.drop_collection()
e = Simple()
e.mapping = 'hello world'
self.assertRaises(ValidationError, e.save)
def test_complex_field_required(self):
"""Ensure required cant be None / Empty"""
class Simple(Document):
mapping = ListField(required=True)
Simple.drop_collection()
e = Simple()
e.mapping = []
self.assertRaises(ValidationError, e.save)
class Simple(Document):
mapping = DictField(required=True)
Simple.drop_collection()
e = Simple()
e.mapping = {}
self.assertRaises(ValidationError, e.save)
def test_complex_field_same_value_not_changed(self):
"""
If a complex field is set to the same value, it should not be marked as
changed.
"""
class Simple(Document):
mapping = ListField()
Simple.drop_collection()
e = Simple().save()
e.mapping = []
self.assertEqual([], e._changed_fields)
class Simple(Document):
mapping = DictField()
Simple.drop_collection()
e = Simple().save()
e.mapping = {}
self.assertEqual([], e._changed_fields)
def test_slice_marks_field_as_changed(self):
class Simple(Document):
widgets = ListField()
simple = Simple(widgets=[1, 2, 3, 4]).save()
simple.widgets[:3] = []
self.assertEqual(['widgets'], simple._changed_fields)
simple.save()
simple = simple.reload()
self.assertEqual(simple.widgets, [4])
def test_del_slice_marks_field_as_changed(self):
class Simple(Document):
widgets = ListField()
simple = Simple(widgets=[1, 2, 3, 4]).save()
del simple.widgets[:3]
self.assertEqual(['widgets'], simple._changed_fields)
simple.save()
simple = simple.reload()
self.assertEqual(simple.widgets, [4])
def test_list_field_complex(self):
"""Ensure that the list fields can handle the complex types."""
class SettingBase(EmbeddedDocument):
meta = {'allow_inheritance': True}
class StringSetting(SettingBase):
value = StringField()
class IntegerSetting(SettingBase):
value = IntField()
class Simple(Document):
mapping = ListField()
Simple.drop_collection()
e = Simple()
e.mapping.append(StringSetting(value='foo'))
e.mapping.append(IntegerSetting(value=42))
e.mapping.append({'number': 1, 'string': 'Hi!', 'float': 1.001,
'complex': IntegerSetting(value=42),
'list': [IntegerSetting(value=42),
StringSetting(value='foo')]})
e.save()
e2 = Simple.objects.get(id=e.id)
self.assertTrue(isinstance(e2.mapping[0], StringSetting))
self.assertTrue(isinstance(e2.mapping[1], IntegerSetting))
# Test querying
self.assertEqual(
Simple.objects.filter(mapping__1__value=42).count(), 1)
self.assertEqual(
Simple.objects.filter(mapping__2__number=1).count(), 1)
self.assertEqual(
Simple.objects.filter(mapping__2__complex__value=42).count(), 1)
self.assertEqual(
Simple.objects.filter(mapping__2__list__0__value=42).count(), 1)
self.assertEqual(
Simple.objects.filter(mapping__2__list__1__value='foo').count(), 1)
# Confirm can update
Simple.objects().update(set__mapping__1=IntegerSetting(value=10))
self.assertEqual(
Simple.objects.filter(mapping__1__value=10).count(), 1)
Simple.objects().update(
set__mapping__2__list__1=StringSetting(value='Boo'))
self.assertEqual(
Simple.objects.filter(mapping__2__list__1__value='foo').count(), 0)
self.assertEqual(
Simple.objects.filter(mapping__2__list__1__value='Boo').count(), 1)
Simple.drop_collection()
def test_dict_field(self):
"""Ensure that dict types work as expected.
"""
class BlogPost(Document):
info = DictField()
BlogPost.drop_collection()
post = BlogPost()
post.info = 'my post'
self.assertRaises(ValidationError, post.validate)
post.info = ['test', 'test']
self.assertRaises(ValidationError, post.validate)
post.info = {'$title': 'test'}
self.assertRaises(ValidationError, post.validate)
post.info = {'nested': {'$title': 'test'}}
self.assertRaises(ValidationError, post.validate)
post.info = {'the.title': 'test'}
self.assertRaises(ValidationError, post.validate)
post.info = {'nested': {'the.title': 'test'}}
self.assertRaises(ValidationError, post.validate)
post.info = {1: 'test'}
self.assertRaises(ValidationError, post.validate)
post.info = {'title': 'test'}
post.save()
post = BlogPost()
post.info = {'details': {'test': 'test'}}
post.save()
post = BlogPost()
post.info = {'details': {'test': 3}}
post.save()
self.assertEqual(BlogPost.objects.count(), 3)
self.assertEqual(
BlogPost.objects.filter(info__title__exact='test').count(), 1)
self.assertEqual(
BlogPost.objects.filter(info__details__test__exact='test').count(), 1)
# Confirm handles non strings or non existing keys
self.assertEqual(
BlogPost.objects.filter(info__details__test__exact=5).count(), 0)
self.assertEqual(
BlogPost.objects.filter(info__made_up__test__exact='test').count(), 0)
post = BlogPost.objects.create(info={'title': 'original'})
post.info.update({'title': 'updated'})
post.save()
post.reload()
self.assertEqual('updated', post.info['title'])
post.info.setdefault('authors', [])
post.save()
post.reload()
self.assertEqual([], post.info['authors'])
BlogPost.drop_collection()
def test_dictfield_strict(self):
"""Ensure that dict field handles validation if provided a strict field type."""
class Simple(Document):
mapping = DictField(field=IntField())
Simple.drop_collection()
e = Simple()
e.mapping['someint'] = 1
e.save()
def create_invalid_mapping():
e.mapping['somestring'] = "abc"
e.save()
self.assertRaises(ValidationError, create_invalid_mapping)
Simple.drop_collection()
def test_dictfield_complex(self):
"""Ensure that the dict field can handle the complex types."""
class SettingBase(EmbeddedDocument):
meta = {'allow_inheritance': True}
class StringSetting(SettingBase):
value = StringField()
class IntegerSetting(SettingBase):
value = IntField()
class Simple(Document):
mapping = DictField()
Simple.drop_collection()
e = Simple()
e.mapping['somestring'] = StringSetting(value='foo')
e.mapping['someint'] = IntegerSetting(value=42)
e.mapping['nested_dict'] = {'number': 1, 'string': 'Hi!',
'float': 1.001,
'complex': IntegerSetting(value=42),
'list': [IntegerSetting(value=42),
StringSetting(value='foo')]}
e.save()
e2 = Simple.objects.get(id=e.id)
self.assertTrue(isinstance(e2.mapping['somestring'], StringSetting))
self.assertTrue(isinstance(e2.mapping['someint'], IntegerSetting))
# Test querying
self.assertEqual(
Simple.objects.filter(mapping__someint__value=42).count(), 1)
self.assertEqual(
Simple.objects.filter(mapping__nested_dict__number=1).count(), 1)
self.assertEqual(
Simple.objects.filter(mapping__nested_dict__complex__value=42).count(), 1)
self.assertEqual(
Simple.objects.filter(mapping__nested_dict__list__0__value=42).count(), 1)
self.assertEqual(
Simple.objects.filter(mapping__nested_dict__list__1__value='foo').count(), 1)
# Confirm can update
Simple.objects().update(
set__mapping={"someint": IntegerSetting(value=10)})
Simple.objects().update(
set__mapping__nested_dict__list__1=StringSetting(value='Boo'))
self.assertEqual(
Simple.objects.filter(mapping__nested_dict__list__1__value='foo').count(), 0)
self.assertEqual(
Simple.objects.filter(mapping__nested_dict__list__1__value='Boo').count(), 1)
Simple.drop_collection()
def test_atomic_update_dict_field(self):
"""Ensure that the entire DictField can be atomically updated."""
class Simple(Document):
mapping = DictField(field=ListField(IntField(required=True)))
Simple.drop_collection()
e = Simple()
e.mapping['someints'] = [1, 2]
e.save()
e.update(set__mapping={"ints": [3, 4]})
e.reload()
self.assertEqual(BaseDict, type(e.mapping))
self.assertEqual({"ints": [3, 4]}, e.mapping)
def create_invalid_mapping():
e.update(set__mapping={"somestrings": ["foo", "bar",]})
self.assertRaises(ValueError, create_invalid_mapping)
Simple.drop_collection()
def test_mapfield(self):
"""Ensure that the MapField handles the declared type."""
class Simple(Document):
mapping = MapField(IntField())
Simple.drop_collection()
e = Simple()
e.mapping['someint'] = 1
e.save()
def create_invalid_mapping():
e.mapping['somestring'] = "abc"
e.save()
self.assertRaises(ValidationError, create_invalid_mapping)
def create_invalid_class():
class NoDeclaredType(Document):
mapping = MapField()
self.assertRaises(ValidationError, create_invalid_class)
Simple.drop_collection()
def test_complex_mapfield(self):
"""Ensure that the MapField can handle complex declared types."""
class SettingBase(EmbeddedDocument):
meta = {"allow_inheritance": True}
class StringSetting(SettingBase):
value = StringField()
class IntegerSetting(SettingBase):
value = IntField()
class Extensible(Document):
mapping = MapField(EmbeddedDocumentField(SettingBase))
Extensible.drop_collection()
e = Extensible()
e.mapping['somestring'] = StringSetting(value='foo')
e.mapping['someint'] = IntegerSetting(value=42)
e.save()
e2 = Extensible.objects.get(id=e.id)
self.assertTrue(isinstance(e2.mapping['somestring'], StringSetting))
self.assertTrue(isinstance(e2.mapping['someint'], IntegerSetting))
def create_invalid_mapping():
e.mapping['someint'] = 123
e.save()
self.assertRaises(ValidationError, create_invalid_mapping)
Extensible.drop_collection()
def test_embedded_mapfield_db_field(self):
class Embedded(EmbeddedDocument):
number = IntField(default=0, db_field='i')
class Test(Document):
my_map = MapField(field=EmbeddedDocumentField(Embedded),
db_field='x')
Test.drop_collection()
test = Test()
test.my_map['DICTIONARY_KEY'] = Embedded(number=1)
test.save()
Test.objects.update_one(inc__my_map__DICTIONARY_KEY__number=1)
test = Test.objects.get()
self.assertEqual(test.my_map['DICTIONARY_KEY'].number, 2)
doc = self.db.test.find_one()
self.assertEqual(doc['x']['DICTIONARY_KEY']['i'], 2)
def test_mapfield_numerical_index(self):
"""Ensure that MapField accept numeric strings as indexes."""
class Embedded(EmbeddedDocument):
name = StringField()
class Test(Document):
my_map = MapField(EmbeddedDocumentField(Embedded))
Test.drop_collection()
test = Test()
test.my_map['1'] = Embedded(name='test')
test.save()
test.my_map['1'].name = 'test updated'
test.save()
Test.drop_collection()
def test_map_field_lookup(self):
"""Ensure MapField lookups succeed on Fields without a lookup method"""
class Action(EmbeddedDocument):
operation = StringField()
object = StringField()
class Log(Document):
name = StringField()
visited = MapField(DateTimeField())
actions = MapField(EmbeddedDocumentField(Action))
Log.drop_collection()
Log(name="wilson", visited={'friends': datetime.datetime.now()},
actions={'friends': Action(operation='drink', object='beer')}).save()
self.assertEqual(1, Log.objects(
visited__friends__exists=True).count())
self.assertEqual(1, Log.objects(
actions__friends__operation='drink',
actions__friends__object='beer').count())
def test_embedded_db_field(self):
class Embedded(EmbeddedDocument):
number = IntField(default=0, db_field='i')
class Test(Document):
embedded = EmbeddedDocumentField(Embedded, db_field='x')
Test.drop_collection()
test = Test()
test.embedded = Embedded(number=1)
test.save()
Test.objects.update_one(inc__embedded__number=1)
test = Test.objects.get()
self.assertEqual(test.embedded.number, 2)
doc = self.db.test.find_one()
self.assertEqual(doc['x']['i'], 2)
def test_embedded_document_validation(self):
"""Ensure that invalid embedded documents cannot be assigned to
embedded document fields.
"""
class Comment(EmbeddedDocument):
content = StringField()
class PersonPreferences(EmbeddedDocument):
food = StringField(required=True)
number = IntField()
class Person(Document):
name = StringField()
preferences = EmbeddedDocumentField(PersonPreferences)
person = Person(name='Test User')
person.preferences = 'My Preferences'
self.assertRaises(ValidationError, person.validate)
# Check that only the right embedded doc works
person.preferences = Comment(content='Nice blog post...')
self.assertRaises(ValidationError, person.validate)
# Check that the embedded doc is valid
person.preferences = PersonPreferences()
self.assertRaises(ValidationError, person.validate)
person.preferences = PersonPreferences(food='Cheese', number=47)
self.assertEqual(person.preferences.food, 'Cheese')
person.validate()
def test_embedded_document_inheritance(self):
"""Ensure that subclasses of embedded documents may be provided to
EmbeddedDocumentFields of the superclass' type.
"""
class User(EmbeddedDocument):
name = StringField()
meta = {'allow_inheritance': True}
class PowerUser(User):
power = IntField()
class BlogPost(Document):
content = StringField()
author = EmbeddedDocumentField(User)
post = BlogPost(content='What I did today...')
post.author = PowerUser(name='Test User', power=47)
post.save()
self.assertEqual(47, BlogPost.objects.first().author.power)
def test_reference_validation(self):
"""Ensure that invalid docment objects cannot be assigned to reference
fields.
"""
class User(Document):
name = StringField()
class BlogPost(Document):
content = StringField()
author = ReferenceField(User)
User.drop_collection()
BlogPost.drop_collection()
self.assertRaises(ValidationError, ReferenceField, EmbeddedDocument)
user = User(name='Test User')
# Ensure that the referenced object must have been saved
post1 = BlogPost(content='Chips and gravy taste good.')
post1.author = user
self.assertRaises(ValidationError, post1.save)
# Check that an invalid object type cannot be used
post2 = BlogPost(content='Chips and chilli taste good.')
post1.author = post2
self.assertRaises(ValidationError, post1.validate)
user.save()
post1.author = user
post1.save()
post2.save()
post1.author = post2
self.assertRaises(ValidationError, post1.validate)
User.drop_collection()
BlogPost.drop_collection()
def test_dbref_reference_fields(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self', dbref=True)
Person.drop_collection()
p1 = Person(name="John").save()
Person(name="Ross", parent=p1).save()
col = Person._get_collection()
data = col.find_one({'name': 'Ross'})
self.assertEqual(data['parent'], DBRef('person', p1.pk))
p = Person.objects.get(name="Ross")
self.assertEqual(p.parent, p1)
def test_dbref_to_mongo(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self', dbref=False)
p1 = Person._from_son({'name': "Yakxxx",
'parent': "50a234ea469ac1eda42d347d"})
mongoed = p1.to_mongo()
self.assertTrue(isinstance(mongoed['parent'], ObjectId))
def test_cached_reference_fields(self):
class Animal(Document):
name = StringField()
tag = StringField()
class Ocorrence(Document):
person = StringField()
animal = CachedReferenceField(
Animal, fields=['tag'])
Animal.drop_collection()
Ocorrence.drop_collection()
a = Animal(name="Leopard", tag="heavy")
a.save()
self.assertEqual(Animal._cached_reference_fields, [Ocorrence.animal])
o = Ocorrence(person="teste", animal=a)
o.save()
p = Ocorrence(person="Wilson")
p.save()
self.assertEqual(Ocorrence.objects(animal=None).count(), 1)
self.assertEqual(
a.to_mongo(fields=['tag']), {'tag': 'heavy', "_id": a.pk})
self.assertEqual(o.to_mongo()['animal']['tag'], 'heavy')
# counts
Ocorrence(person="teste 2").save()
Ocorrence(person="teste 3").save()
count = Ocorrence.objects(animal__tag='heavy').count()
self.assertEqual(count, 1)
ocorrence = Ocorrence.objects(animal__tag='heavy').first()
self.assertEqual(ocorrence.person, "teste")
self.assertTrue(isinstance(ocorrence.animal, Animal))
def test_cached_reference_field_decimal(self):
class PersonAuto(Document):
name = StringField()
salary = DecimalField()
class SocialTest(Document):
group = StringField()
person = CachedReferenceField(
PersonAuto,
fields=('salary',))
PersonAuto.drop_collection()
SocialTest.drop_collection()
p = PersonAuto(name="Alberto", salary=Decimal('7000.00'))
p.save()
s = SocialTest(group="dev", person=p)
s.save()
self.assertEqual(
SocialTest.objects._collection.find_one({'person.salary': 7000.00}), {
'_id': s.pk,
'group': s.group,
'person': {
'_id': p.pk,
'salary': 7000.00
}
})
def test_cached_reference_field_reference(self):
class Group(Document):
name = StringField()
class Person(Document):
name = StringField()
group = ReferenceField(Group)
class SocialData(Document):
obs = StringField()
tags = ListField(
StringField())
person = CachedReferenceField(
Person,
fields=('group',))
Group.drop_collection()
Person.drop_collection()
SocialData.drop_collection()
g1 = Group(name='dev')
g1.save()
g2 = Group(name="designers")
g2.save()
p1 = Person(name="Alberto", group=g1)
p1.save()
p2 = Person(name="Andre", group=g1)
p2.save()
p3 = Person(name="Afro design", group=g2)
p3.save()
s1 = SocialData(obs="testing 123", person=p1, tags=['tag1', 'tag2'])
s1.save()
s2 = SocialData(obs="testing 321", person=p3, tags=['tag3', 'tag4'])
s2.save()
self.assertEqual(SocialData.objects._collection.find_one(
{'tags': 'tag2'}), {
'_id': s1.pk,
'obs': 'testing 123',
'tags': ['tag1', 'tag2'],
'person': {
'_id': p1.pk,
'group': g1.pk
}
})
self.assertEqual(SocialData.objects(person__group=g2).count(), 1)
self.assertEqual(SocialData.objects(person__group=g2).first(), s2)
def test_cached_reference_field_update_all(self):
class Person(Document):
TYPES = (
('pf', "PF"),
('pj', "PJ")
)
name = StringField()
tp = StringField(
choices=TYPES
)
father = CachedReferenceField('self', fields=('tp',))
Person.drop_collection()
a1 = Person(name="Wilson Father", tp="pj")
a1.save()
a2 = Person(name='Wilson Junior', tp='pf', father=a1)
a2.save()
self.assertEqual(dict(a2.to_mongo()), {
"_id": a2.pk,
"name": u"Wilson Junior",
"tp": u"pf",
"father": {
"_id": a1.pk,
"tp": u"pj"
}
})
self.assertEqual(Person.objects(father=a1)._query, {
'father._id': a1.pk
})
self.assertEqual(Person.objects(father=a1).count(), 1)
Person.objects.update(set__tp="pf")
Person.father.sync_all()
a2.reload()
self.assertEqual(dict(a2.to_mongo()), {
"_id": a2.pk,
"name": u"Wilson Junior",
"tp": u"pf",
"father": {
"_id": a1.pk,
"tp": u"pf"
}
})
def test_cached_reference_fields_on_embedded_documents(self):
def build():
class Test(Document):
name = StringField()
type('WrongEmbeddedDocument', (
EmbeddedDocument,), {
'test': CachedReferenceField(Test)
})
self.assertRaises(InvalidDocumentError, build)
def test_cached_reference_auto_sync(self):
class Person(Document):
TYPES = (
('pf', "PF"),
('pj', "PJ")
)
name = StringField()
tp = StringField(
choices=TYPES
)
father = CachedReferenceField('self', fields=('tp',))
Person.drop_collection()
a1 = Person(name="Wilson Father", tp="pj")
a1.save()
a2 = Person(name='Wilson Junior', tp='pf', father=a1)
a2.save()
a1.tp = 'pf'
a1.save()
a2.reload()
self.assertEqual(dict(a2.to_mongo()), {
'_id': a2.pk,
'name': 'Wilson Junior',
'tp': 'pf',
'father': {
'_id': a1.pk,
'tp': 'pf'
}
})
def test_cached_reference_auto_sync_disabled(self):
class Persone(Document):
TYPES = (
('pf', "PF"),
('pj', "PJ")
)
name = StringField()
tp = StringField(
choices=TYPES
)
father = CachedReferenceField(
'self', fields=('tp',), auto_sync=False)
Persone.drop_collection()
a1 = Persone(name="Wilson Father", tp="pj")
a1.save()
a2 = Persone(name='Wilson Junior', tp='pf', father=a1)
a2.save()
a1.tp = 'pf'
a1.save()
self.assertEqual(Persone.objects._collection.find_one({'_id': a2.pk}), {
'_id': a2.pk,
'name': 'Wilson Junior',
'tp': 'pf',
'father': {
'_id': a1.pk,
'tp': 'pj'
}
})
def test_cached_reference_embedded_fields(self):
class Owner(EmbeddedDocument):
TPS = (
('n', "Normal"),
('u', "Urgent")
)
name = StringField()
tp = StringField(
verbose_name="Type",
db_field="t",
choices=TPS)
class Animal(Document):
name = StringField()
tag = StringField()
owner = EmbeddedDocumentField(Owner)
class Ocorrence(Document):
person = StringField()
animal = CachedReferenceField(
Animal, fields=['tag', 'owner.tp'])
Animal.drop_collection()
Ocorrence.drop_collection()
a = Animal(name="Leopard", tag="heavy",
owner=Owner(tp='u', name="Wilson Júnior")
)
a.save()
o = Ocorrence(person="teste", animal=a)
o.save()
self.assertEqual(dict(a.to_mongo(fields=['tag', 'owner.tp'])), {
'_id': a.pk,
'tag': 'heavy',
'owner': {
't': 'u'
}
})
self.assertEqual(o.to_mongo()['animal']['tag'], 'heavy')
self.assertEqual(o.to_mongo()['animal']['owner']['t'], 'u')
# counts
Ocorrence(person="teste 2").save()
Ocorrence(person="teste 3").save()
count = Ocorrence.objects(
animal__tag='heavy', animal__owner__tp='u').count()
self.assertEqual(count, 1)
ocorrence = Ocorrence.objects(
animal__tag='heavy',
animal__owner__tp='u').first()
self.assertEqual(ocorrence.person, "teste")
self.assertTrue(isinstance(ocorrence.animal, Animal))
def test_cached_reference_embedded_list_fields(self):
class Owner(EmbeddedDocument):
name = StringField()
tags = ListField(StringField())
class Animal(Document):
name = StringField()
tag = StringField()
owner = EmbeddedDocumentField(Owner)
class Ocorrence(Document):
person = StringField()
animal = CachedReferenceField(
Animal, fields=['tag', 'owner.tags'])
Animal.drop_collection()
Ocorrence.drop_collection()
a = Animal(name="Leopard", tag="heavy",
owner=Owner(tags=['cool', 'funny'],
name="Wilson Júnior")
)
a.save()
o = Ocorrence(person="teste 2", animal=a)
o.save()
self.assertEqual(dict(a.to_mongo(fields=['tag', 'owner.tags'])), {
'_id': a.pk,
'tag': 'heavy',
'owner': {
'tags': ['cool', 'funny']
}
})
self.assertEqual(o.to_mongo()['animal']['tag'], 'heavy')
self.assertEqual(o.to_mongo()['animal']['owner']['tags'],
['cool', 'funny'])
# counts
Ocorrence(person="teste 2").save()
Ocorrence(person="teste 3").save()
query = Ocorrence.objects(
animal__tag='heavy', animal__owner__tags='cool')._query
self.assertEqual(
query, {'animal.owner.tags': 'cool', 'animal.tag': 'heavy'})
ocorrence = Ocorrence.objects(
animal__tag='heavy',
animal__owner__tags='cool').first()
self.assertEqual(ocorrence.person, "teste 2")
self.assertTrue(isinstance(ocorrence.animal, Animal))
def test_objectid_reference_fields(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self', dbref=False)
Person.drop_collection()
p1 = Person(name="John").save()
Person(name="Ross", parent=p1).save()
col = Person._get_collection()
data = col.find_one({'name': 'Ross'})
self.assertEqual(data['parent'], p1.pk)
p = Person.objects.get(name="Ross")
self.assertEqual(p.parent, p1)
def test_list_item_dereference(self):
"""Ensure that DBRef items in ListFields are dereferenced.
"""
class User(Document):
name = StringField()
class Group(Document):
members = ListField(ReferenceField(User))
User.drop_collection()
Group.drop_collection()
user1 = User(name='user1')
user1.save()
user2 = User(name='user2')
user2.save()
group = Group(members=[user1, user2])
group.save()
group_obj = Group.objects.first()
self.assertEqual(group_obj.members[0].name, user1.name)
self.assertEqual(group_obj.members[1].name, user2.name)
User.drop_collection()
Group.drop_collection()
def test_recursive_reference(self):
"""Ensure that ReferenceFields can reference their own documents.
"""
class Employee(Document):
name = StringField()
boss = ReferenceField('self')
friends = ListField(ReferenceField('self'))
Employee.drop_collection()
bill = Employee(name='Bill Lumbergh')
bill.save()
michael = Employee(name='Michael Bolton')
michael.save()
samir = Employee(name='Samir Nagheenanajar')
samir.save()
friends = [michael, samir]
peter = Employee(name='Peter Gibbons', boss=bill, friends=friends)
peter.save()
peter = Employee.objects.with_id(peter.id)
self.assertEqual(peter.boss, bill)
self.assertEqual(peter.friends, friends)
def test_recursive_embedding(self):
"""Ensure that EmbeddedDocumentFields can contain their own documents.
"""
class TreeNode(EmbeddedDocument):
name = StringField()
children = ListField(EmbeddedDocumentField('self'))
class Tree(Document):
name = StringField()
children = ListField(EmbeddedDocumentField('TreeNode'))
Tree.drop_collection()
tree = Tree(name="Tree")
first_child = TreeNode(name="Child 1")
tree.children.append(first_child)
second_child = TreeNode(name="Child 2")
first_child.children.append(second_child)
tree.save()
tree = Tree.objects.first()
self.assertEqual(len(tree.children), 1)
self.assertEqual(len(tree.children[0].children), 1)
third_child = TreeNode(name="Child 3")
tree.children[0].children.append(third_child)
tree.save()
self.assertEqual(len(tree.children), 1)
self.assertEqual(tree.children[0].name, first_child.name)
self.assertEqual(tree.children[0].children[0].name, second_child.name)
self.assertEqual(tree.children[0].children[1].name, third_child.name)
# Test updating
tree.children[0].name = 'I am Child 1'
tree.children[0].children[0].name = 'I am Child 2'
tree.children[0].children[1].name = 'I am Child 3'
tree.save()
self.assertEqual(tree.children[0].name, 'I am Child 1')
self.assertEqual(tree.children[0].children[0].name, 'I am Child 2')
self.assertEqual(tree.children[0].children[1].name, 'I am Child 3')
# Test removal
self.assertEqual(len(tree.children[0].children), 2)
del(tree.children[0].children[1])
tree.save()
self.assertEqual(len(tree.children[0].children), 1)
tree.children[0].children.pop(0)
tree.save()
self.assertEqual(len(tree.children[0].children), 0)
self.assertEqual(tree.children[0].children, [])
tree.children[0].children.insert(0, third_child)
tree.children[0].children.insert(0, second_child)
tree.save()
self.assertEqual(len(tree.children[0].children), 2)
self.assertEqual(tree.children[0].children[0].name, second_child.name)
self.assertEqual(tree.children[0].children[1].name, third_child.name)
def test_undefined_reference(self):
"""Ensure that ReferenceFields may reference undefined Documents.
"""
class Product(Document):
name = StringField()
company = ReferenceField('Company')
class Company(Document):
name = StringField()
Product.drop_collection()
Company.drop_collection()
ten_gen = Company(name='10gen')
ten_gen.save()
mongodb = Product(name='MongoDB', company=ten_gen)
mongodb.save()
me = Product(name='MongoEngine')
me.save()
obj = Product.objects(company=ten_gen).first()
self.assertEqual(obj, mongodb)
self.assertEqual(obj.company, ten_gen)
obj = Product.objects(company=None).first()
self.assertEqual(obj, me)
obj, created = Product.objects.get_or_create(company=None)
self.assertEqual(created, False)
self.assertEqual(obj, me)
def test_reference_query_conversion(self):
"""Ensure that ReferenceFields can be queried using objects and values
of the type of the primary key of the referenced object.
"""
class Member(Document):
user_num = IntField(primary_key=True)
class BlogPost(Document):
title = StringField()
author = ReferenceField(Member, dbref=False)
Member.drop_collection()
BlogPost.drop_collection()
m1 = Member(user_num=1)
m1.save()
m2 = Member(user_num=2)
m2.save()
post1 = BlogPost(title='post 1', author=m1)
post1.save()
post2 = BlogPost(title='post 2', author=m2)
post2.save()
post = BlogPost.objects(author=m1).first()
self.assertEqual(post.id, post1.id)
post = BlogPost.objects(author=m2).first()
self.assertEqual(post.id, post2.id)
Member.drop_collection()
BlogPost.drop_collection()
def test_reference_query_conversion_dbref(self):
"""Ensure that ReferenceFields can be queried using objects and values
of the type of the primary key of the referenced object.
"""
class Member(Document):
user_num = IntField(primary_key=True)
class BlogPost(Document):
title = StringField()
author = ReferenceField(Member, dbref=True)
Member.drop_collection()
BlogPost.drop_collection()
m1 = Member(user_num=1)
m1.save()
m2 = Member(user_num=2)
m2.save()
post1 = BlogPost(title='post 1', author=m1)
post1.save()
post2 = BlogPost(title='post 2', author=m2)
post2.save()
post = BlogPost.objects(author=m1).first()
self.assertEqual(post.id, post1.id)
post = BlogPost.objects(author=m2).first()
self.assertEqual(post.id, post2.id)
Member.drop_collection()
BlogPost.drop_collection()
def test_generic_reference(self):
"""Ensure that a GenericReferenceField properly dereferences items.
"""
class Link(Document):
title = StringField()
meta = {'allow_inheritance': False}
class Post(Document):
title = StringField()
class Bookmark(Document):
bookmark_object = GenericReferenceField()
Link.drop_collection()
Post.drop_collection()
Bookmark.drop_collection()
link_1 = Link(title="Pitchfork")
link_1.save()
post_1 = Post(title="Behind the Scenes of the Pavement Reunion")
post_1.save()
bm = Bookmark(bookmark_object=post_1)
bm.save()
bm = Bookmark.objects(bookmark_object=post_1).first()
self.assertEqual(bm.bookmark_object, post_1)
self.assertTrue(isinstance(bm.bookmark_object, Post))
bm.bookmark_object = link_1
bm.save()
bm = Bookmark.objects(bookmark_object=link_1).first()
self.assertEqual(bm.bookmark_object, link_1)
self.assertTrue(isinstance(bm.bookmark_object, Link))
Link.drop_collection()
Post.drop_collection()
Bookmark.drop_collection()
def test_generic_reference_list(self):
"""Ensure that a ListField properly dereferences generic references.
"""
class Link(Document):
title = StringField()
class Post(Document):
title = StringField()
class User(Document):
bookmarks = ListField(GenericReferenceField())
Link.drop_collection()
Post.drop_collection()
User.drop_collection()
link_1 = Link(title="Pitchfork")
link_1.save()
post_1 = Post(title="Behind the Scenes of the Pavement Reunion")
post_1.save()
user = User(bookmarks=[post_1, link_1])
user.save()
user = User.objects(bookmarks__all=[post_1, link_1]).first()
self.assertEqual(user.bookmarks[0], post_1)
self.assertEqual(user.bookmarks[1], link_1)
Link.drop_collection()
Post.drop_collection()
User.drop_collection()
def test_generic_reference_document_not_registered(self):
"""Ensure dereferencing out of the document registry throws a
`NotRegistered` error.
"""
class Link(Document):
title = StringField()
class User(Document):
bookmarks = ListField(GenericReferenceField())
Link.drop_collection()
User.drop_collection()
link_1 = Link(title="Pitchfork")
link_1.save()
user = User(bookmarks=[link_1])
user.save()
# Mimic User and Link definitions being in a different file
# and the Link model not being imported in the User file.
del(_document_registry["Link"])
user = User.objects.first()
try:
user.bookmarks
raise AssertionError("Link was removed from the registry")
except NotRegistered:
pass
Link.drop_collection()
User.drop_collection()
def test_generic_reference_is_none(self):
class Person(Document):
name = StringField()
city = GenericReferenceField()
Person.drop_collection()
Person(name="Wilson Jr").save()
self.assertEqual(repr(Person.objects(city=None)),
"[<Person: Person object>]")
def test_generic_reference_choices(self):
"""Ensure that a GenericReferenceField can handle choices
"""
class Link(Document):
title = StringField()
class Post(Document):
title = StringField()
class Bookmark(Document):
bookmark_object = GenericReferenceField(choices=(Post,))
Link.drop_collection()
Post.drop_collection()
Bookmark.drop_collection()
link_1 = Link(title="Pitchfork")
link_1.save()
post_1 = Post(title="Behind the Scenes of the Pavement Reunion")
post_1.save()
bm = Bookmark(bookmark_object=link_1)
self.assertRaises(ValidationError, bm.validate)
bm = Bookmark(bookmark_object=post_1)
bm.save()
bm = Bookmark.objects.first()
self.assertEqual(bm.bookmark_object, post_1)
def test_generic_reference_list_choices(self):
"""Ensure that a ListField properly dereferences generic references and
respects choices.
"""
class Link(Document):
title = StringField()
class Post(Document):
title = StringField()
class User(Document):
bookmarks = ListField(GenericReferenceField(choices=(Post,)))
Link.drop_collection()
Post.drop_collection()
User.drop_collection()
link_1 = Link(title="Pitchfork")
link_1.save()
post_1 = Post(title="Behind the Scenes of the Pavement Reunion")
post_1.save()
user = User(bookmarks=[link_1])
self.assertRaises(ValidationError, user.validate)
user = User(bookmarks=[post_1])
user.save()
user = User.objects.first()
self.assertEqual(user.bookmarks, [post_1])
Link.drop_collection()
Post.drop_collection()
User.drop_collection()
def test_generic_reference_list_item_modification(self):
"""Ensure that modifications of related documents (through generic reference) don't influence on querying
"""
class Post(Document):
title = StringField()
class User(Document):
username = StringField()
bookmarks = ListField(GenericReferenceField())
Post.drop_collection()
User.drop_collection()
post_1 = Post(title="Behind the Scenes of the Pavement Reunion")
post_1.save()
user = User(bookmarks=[post_1])
user.save()
post_1.title = "Title was modified"
user.username = "New username"
user.save()
user = User.objects(bookmarks__all=[post_1]).first()
self.assertNotEqual(user, None)
self.assertEqual(user.bookmarks[0], post_1)
Post.drop_collection()
User.drop_collection()
def test_binary_fields(self):
"""Ensure that binary fields can be stored and retrieved.
"""
class Attachment(Document):
content_type = StringField()
blob = BinaryField()
BLOB = b('\xe6\x00\xc4\xff\x07')
MIME_TYPE = 'application/octet-stream'
Attachment.drop_collection()
attachment = Attachment(content_type=MIME_TYPE, blob=BLOB)
attachment.save()
attachment_1 = Attachment.objects().first()
self.assertEqual(MIME_TYPE, attachment_1.content_type)
self.assertEqual(BLOB, bin_type(attachment_1.blob))
Attachment.drop_collection()
def test_binary_validation(self):
"""Ensure that invalid values cannot be assigned to binary fields.
"""
class Attachment(Document):
blob = BinaryField()
class AttachmentRequired(Document):
blob = BinaryField(required=True)
class AttachmentSizeLimit(Document):
blob = BinaryField(max_bytes=4)
Attachment.drop_collection()
AttachmentRequired.drop_collection()
AttachmentSizeLimit.drop_collection()
attachment = Attachment()
attachment.validate()
attachment.blob = 2
self.assertRaises(ValidationError, attachment.validate)
attachment_required = AttachmentRequired()
self.assertRaises(ValidationError, attachment_required.validate)
attachment_required.blob = Binary(b('\xe6\x00\xc4\xff\x07'))
attachment_required.validate()
attachment_size_limit = AttachmentSizeLimit(
blob=b('\xe6\x00\xc4\xff\x07'))
self.assertRaises(ValidationError, attachment_size_limit.validate)
attachment_size_limit.blob = b('\xe6\x00\xc4\xff')
attachment_size_limit.validate()
Attachment.drop_collection()
AttachmentRequired.drop_collection()
AttachmentSizeLimit.drop_collection()
def test_binary_field_primary(self):
class Attachment(Document):
id = BinaryField(primary_key=True)
Attachment.drop_collection()
binary_id = uuid.uuid4().bytes
att = Attachment(id=binary_id).save()
self.assertEqual(1, Attachment.objects.count())
self.assertEqual(1, Attachment.objects.filter(id=att.id).count())
# TODO use assertIsNotNone once Python 2.6 support is dropped
self.assertTrue(Attachment.objects.filter(id=att.id).first() is not None)
att.delete()
self.assertEqual(0, Attachment.objects.count())
def test_binary_field_primary_filter_by_binary_pk_as_str(self):
raise SkipTest("Querying by id as string is not currently supported")
class Attachment(Document):
id = BinaryField(primary_key=True)
Attachment.drop_collection()
binary_id = uuid.uuid4().bytes
att = Attachment(id=binary_id).save()
self.assertEqual(1, Attachment.objects.filter(id=binary_id).count())
# TODO use assertIsNotNone once Python 2.6 support is dropped
self.assertTrue(Attachment.objects.filter(id=binary_id).first() is not None)
att.delete()
self.assertEqual(0, Attachment.objects.count())
def test_choices_validation(self):
"""Ensure that value is in a container of allowed values.
"""
class Shirt(Document):
size = StringField(max_length=3, choices=(
('S', 'Small'), ('M', 'Medium'), ('L', 'Large'),
('XL', 'Extra Large'), ('XXL', 'Extra Extra Large')))
Shirt.drop_collection()
shirt = Shirt()
shirt.validate()
shirt.size = "S"
shirt.validate()
shirt.size = "XS"
self.assertRaises(ValidationError, shirt.validate)
Shirt.drop_collection()
def test_choices_validation_documents(self):
"""
Ensure fields with document choices validate given a valid choice.
"""
class UserComments(EmbeddedDocument):
author = StringField()
message = StringField()
class BlogPost(Document):
comments = ListField(
GenericEmbeddedDocumentField(choices=(UserComments,))
)
# Ensure Validation Passes
BlogPost(comments=[
UserComments(author='user2', message='message2'),
]).save()
def test_choices_validation_documents_invalid(self):
"""
Ensure fields with document choices validate given an invalid choice.
This should throw a ValidationError exception.
"""
class UserComments(EmbeddedDocument):
author = StringField()
message = StringField()
class ModeratorComments(EmbeddedDocument):
author = StringField()
message = StringField()
class BlogPost(Document):
comments = ListField(
GenericEmbeddedDocumentField(choices=(UserComments,))
)
# Single Entry Failure
post = BlogPost(comments=[
ModeratorComments(author='mod1', message='message1'),
])
self.assertRaises(ValidationError, post.save)
# Mixed Entry Failure
post = BlogPost(comments=[
ModeratorComments(author='mod1', message='message1'),
UserComments(author='user2', message='message2'),
])
self.assertRaises(ValidationError, post.save)
def test_choices_validation_documents_inheritance(self):
"""
Ensure fields with document choices validate given subclass of choice.
"""
class Comments(EmbeddedDocument):
meta = {
'abstract': True
}
author = StringField()
message = StringField()
class UserComments(Comments):
pass
class BlogPost(Document):
comments = ListField(
GenericEmbeddedDocumentField(choices=(Comments,))
)
# Save Valid EmbeddedDocument Type
BlogPost(comments=[
UserComments(author='user2', message='message2'),
]).save()
def test_choices_get_field_display(self):
"""Test dynamic helper for returning the display value of a choices
field.
"""
class Shirt(Document):
size = StringField(max_length=3, choices=(
('S', 'Small'), ('M', 'Medium'), ('L', 'Large'),
('XL', 'Extra Large'), ('XXL', 'Extra Extra Large')))
style = StringField(max_length=3, choices=(
('S', 'Small'), ('B', 'Baggy'), ('W', 'wide')), default='S')
Shirt.drop_collection()
shirt = Shirt()
self.assertEqual(shirt.get_size_display(), None)
self.assertEqual(shirt.get_style_display(), 'Small')
shirt.size = "XXL"
shirt.style = "B"
self.assertEqual(shirt.get_size_display(), 'Extra Extra Large')
self.assertEqual(shirt.get_style_display(), 'Baggy')
# Set as Z - an invalid choice
shirt.size = "Z"
shirt.style = "Z"
self.assertEqual(shirt.get_size_display(), 'Z')
self.assertEqual(shirt.get_style_display(), 'Z')
self.assertRaises(ValidationError, shirt.validate)
Shirt.drop_collection()
def test_simple_choices_validation(self):
"""Ensure that value is in a container of allowed values.
"""
class Shirt(Document):
size = StringField(max_length=3,
choices=('S', 'M', 'L', 'XL', 'XXL'))
Shirt.drop_collection()
shirt = Shirt()
shirt.validate()
shirt.size = "S"
shirt.validate()
shirt.size = "XS"
self.assertRaises(ValidationError, shirt.validate)
Shirt.drop_collection()
def test_simple_choices_get_field_display(self):
"""Test dynamic helper for returning the display value of a choices
field.
"""
class Shirt(Document):
size = StringField(max_length=3,
choices=('S', 'M', 'L', 'XL', 'XXL'))
style = StringField(max_length=3,
choices=('Small', 'Baggy', 'wide'),
default='Small')
Shirt.drop_collection()
shirt = Shirt()
self.assertEqual(shirt.get_size_display(), None)
self.assertEqual(shirt.get_style_display(), 'Small')
shirt.size = "XXL"
shirt.style = "Baggy"
self.assertEqual(shirt.get_size_display(), 'XXL')
self.assertEqual(shirt.get_style_display(), 'Baggy')
# Set as Z - an invalid choice
shirt.size = "Z"
shirt.style = "Z"
self.assertEqual(shirt.get_size_display(), 'Z')
self.assertEqual(shirt.get_style_display(), 'Z')
self.assertRaises(ValidationError, shirt.validate)
Shirt.drop_collection()
def test_simple_choices_validation_invalid_value(self):
"""Ensure that error messages are correct.
"""
SIZES = ('S', 'M', 'L', 'XL', 'XXL')
COLORS = (('R', 'Red'), ('B', 'Blue'))
SIZE_MESSAGE = u"Value must be one of ('S', 'M', 'L', 'XL', 'XXL')"
COLOR_MESSAGE = u"Value must be one of ['R', 'B']"
class Shirt(Document):
size = StringField(max_length=3, choices=SIZES)
color = StringField(max_length=1, choices=COLORS)
Shirt.drop_collection()
shirt = Shirt()
shirt.validate()
shirt.size = "S"
shirt.color = "R"
shirt.validate()
shirt.size = "XS"
shirt.color = "G"
try:
shirt.validate()
except ValidationError, error:
# get the validation rules
error_dict = error.to_dict()
self.assertEqual(error_dict['size'], SIZE_MESSAGE)
self.assertEqual(error_dict['color'], COLOR_MESSAGE)
Shirt.drop_collection()
def test_ensure_unique_default_instances(self):
"""Ensure that every field has it's own unique default instance."""
class D(Document):
data = DictField()
data2 = DictField(default=lambda: {})
d1 = D()
d1.data['foo'] = 'bar'
d1.data2['foo'] = 'bar'
d2 = D()
self.assertEqual(d2.data, {})
self.assertEqual(d2.data2, {})
def test_sequence_field(self):
class Person(Document):
id = SequenceField(primary_key=True)
name = StringField()
self.db['mongoengine.counters'].drop()
Person.drop_collection()
for x in xrange(10):
Person(name="Person %s" % x).save()
c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'})
self.assertEqual(c['next'], 10)
ids = [i.id for i in Person.objects]
self.assertEqual(ids, range(1, 11))
c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'})
self.assertEqual(c['next'], 10)
Person.id.set_next_value(1000)
c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'})
self.assertEqual(c['next'], 1000)
def test_sequence_field_get_next_value(self):
class Person(Document):
id = SequenceField(primary_key=True)
name = StringField()
self.db['mongoengine.counters'].drop()
Person.drop_collection()
for x in xrange(10):
Person(name="Person %s" % x).save()
self.assertEqual(Person.id.get_next_value(), 11)
self.db['mongoengine.counters'].drop()
self.assertEqual(Person.id.get_next_value(), 1)
class Person(Document):
id = SequenceField(primary_key=True, value_decorator=str)
name = StringField()
self.db['mongoengine.counters'].drop()
Person.drop_collection()
for x in xrange(10):
Person(name="Person %s" % x).save()
self.assertEqual(Person.id.get_next_value(), '11')
self.db['mongoengine.counters'].drop()
self.assertEqual(Person.id.get_next_value(), '1')
def test_sequence_field_sequence_name(self):
class Person(Document):
id = SequenceField(primary_key=True, sequence_name='jelly')
name = StringField()
self.db['mongoengine.counters'].drop()
Person.drop_collection()
for x in xrange(10):
Person(name="Person %s" % x).save()
c = self.db['mongoengine.counters'].find_one({'_id': 'jelly.id'})
self.assertEqual(c['next'], 10)
ids = [i.id for i in Person.objects]
self.assertEqual(ids, range(1, 11))
c = self.db['mongoengine.counters'].find_one({'_id': 'jelly.id'})
self.assertEqual(c['next'], 10)
Person.id.set_next_value(1000)
c = self.db['mongoengine.counters'].find_one({'_id': 'jelly.id'})
self.assertEqual(c['next'], 1000)
def test_multiple_sequence_fields(self):
class Person(Document):
id = SequenceField(primary_key=True)
counter = SequenceField()
name = StringField()
self.db['mongoengine.counters'].drop()
Person.drop_collection()
for x in xrange(10):
Person(name="Person %s" % x).save()
c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'})
self.assertEqual(c['next'], 10)
ids = [i.id for i in Person.objects]
self.assertEqual(ids, range(1, 11))
counters = [i.counter for i in Person.objects]
self.assertEqual(counters, range(1, 11))
c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'})
self.assertEqual(c['next'], 10)
Person.id.set_next_value(1000)
c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'})
self.assertEqual(c['next'], 1000)
Person.counter.set_next_value(999)
c = self.db['mongoengine.counters'].find_one({'_id': 'person.counter'})
self.assertEqual(c['next'], 999)
def test_sequence_fields_reload(self):
class Animal(Document):
counter = SequenceField()
name = StringField()
self.db['mongoengine.counters'].drop()
Animal.drop_collection()
a = Animal(name="Boi").save()
self.assertEqual(a.counter, 1)
a.reload()
self.assertEqual(a.counter, 1)
a.counter = None
self.assertEqual(a.counter, 2)
a.save()
self.assertEqual(a.counter, 2)
a = Animal.objects.first()
self.assertEqual(a.counter, 2)
a.reload()
self.assertEqual(a.counter, 2)
def test_multiple_sequence_fields_on_docs(self):
class Animal(Document):
id = SequenceField(primary_key=True)
name = StringField()
class Person(Document):
id = SequenceField(primary_key=True)
name = StringField()
self.db['mongoengine.counters'].drop()
Animal.drop_collection()
Person.drop_collection()
for x in xrange(10):
Animal(name="Animal %s" % x).save()
Person(name="Person %s" % x).save()
c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'})
self.assertEqual(c['next'], 10)
c = self.db['mongoengine.counters'].find_one({'_id': 'animal.id'})
self.assertEqual(c['next'], 10)
ids = [i.id for i in Person.objects]
self.assertEqual(ids, range(1, 11))
id = [i.id for i in Animal.objects]
self.assertEqual(id, range(1, 11))
c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'})
self.assertEqual(c['next'], 10)
c = self.db['mongoengine.counters'].find_one({'_id': 'animal.id'})
self.assertEqual(c['next'], 10)
def test_sequence_field_value_decorator(self):
class Person(Document):
id = SequenceField(primary_key=True, value_decorator=str)
name = StringField()
self.db['mongoengine.counters'].drop()
Person.drop_collection()
for x in xrange(10):
p = Person(name="Person %s" % x)
p.save()
c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'})
self.assertEqual(c['next'], 10)
ids = [i.id for i in Person.objects]
self.assertEqual(ids, map(str, range(1, 11)))
c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'})
self.assertEqual(c['next'], 10)
def test_embedded_sequence_field(self):
class Comment(EmbeddedDocument):
id = SequenceField()
content = StringField(required=True)
class Post(Document):
title = StringField(required=True)
comments = ListField(EmbeddedDocumentField(Comment))
self.db['mongoengine.counters'].drop()
Post.drop_collection()
Post(title="MongoEngine",
comments=[Comment(content="NoSQL Rocks"),
Comment(content="MongoEngine Rocks")]).save()
c = self.db['mongoengine.counters'].find_one({'_id': 'comment.id'})
self.assertEqual(c['next'], 2)
post = Post.objects.first()
self.assertEqual(1, post.comments[0].id)
self.assertEqual(2, post.comments[1].id)
def test_generic_embedded_document(self):
class Car(EmbeddedDocument):
name = StringField()
class Dish(EmbeddedDocument):
food = StringField(required=True)
number = IntField()
class Person(Document):
name = StringField()
like = GenericEmbeddedDocumentField()
Person.drop_collection()
person = Person(name='Test User')
person.like = Car(name='Fiat')
person.save()
person = Person.objects.first()
self.assertTrue(isinstance(person.like, Car))
person.like = Dish(food="arroz", number=15)
person.save()
person = Person.objects.first()
self.assertTrue(isinstance(person.like, Dish))
def test_generic_embedded_document_choices(self):
"""Ensure you can limit GenericEmbeddedDocument choices
"""
class Car(EmbeddedDocument):
name = StringField()
class Dish(EmbeddedDocument):
food = StringField(required=True)
number = IntField()
class Person(Document):
name = StringField()
like = GenericEmbeddedDocumentField(choices=(Dish,))
Person.drop_collection()
person = Person(name='Test User')
person.like = Car(name='Fiat')
self.assertRaises(ValidationError, person.validate)
person.like = Dish(food="arroz", number=15)
person.save()
person = Person.objects.first()
self.assertTrue(isinstance(person.like, Dish))
def test_generic_list_embedded_document_choices(self):
"""Ensure you can limit GenericEmbeddedDocument choices inside a list
field
"""
class Car(EmbeddedDocument):
name = StringField()
class Dish(EmbeddedDocument):
food = StringField(required=True)
number = IntField()
class Person(Document):
name = StringField()
likes = ListField(GenericEmbeddedDocumentField(choices=(Dish,)))
Person.drop_collection()
person = Person(name='Test User')
person.likes = [Car(name='Fiat')]
self.assertRaises(ValidationError, person.validate)
person.likes = [Dish(food="arroz", number=15)]
person.save()
person = Person.objects.first()
self.assertTrue(isinstance(person.likes[0], Dish))
def test_recursive_validation(self):
"""Ensure that a validation result to_dict is available.
"""
class Author(EmbeddedDocument):
name = StringField(required=True)
class Comment(EmbeddedDocument):
author = EmbeddedDocumentField(Author, required=True)
content = StringField(required=True)
class Post(Document):
title = StringField(required=True)
comments = ListField(EmbeddedDocumentField(Comment))
bob = Author(name='Bob')
post = Post(title='hello world')
post.comments.append(Comment(content='hello', author=bob))
post.comments.append(Comment(author=bob))
self.assertRaises(ValidationError, post.validate)
try:
post.validate()
except ValidationError, error:
# ValidationError.errors property
self.assertTrue(hasattr(error, 'errors'))
self.assertTrue(isinstance(error.errors, dict))
self.assertTrue('comments' in error.errors)
self.assertTrue(1 in error.errors['comments'])
self.assertTrue(isinstance(error.errors['comments'][1]['content'],
ValidationError))
# ValidationError.schema property
error_dict = error.to_dict()
self.assertTrue(isinstance(error_dict, dict))
self.assertTrue('comments' in error_dict)
self.assertTrue(1 in error_dict['comments'])
self.assertTrue('content' in error_dict['comments'][1])
self.assertEqual(error_dict['comments'][1]['content'],
u'Field is required')
post.comments[1].content = 'here we go'
post.validate()
def test_email_field(self):
class User(Document):
email = EmailField()
user = User(email="ross@example.com")
self.assertTrue(user.validate() is None)
user = User(email="ross@example.co.uk")
self.assertTrue(user.validate() is None)
user = User(email=("Kofq@rhom0e4klgauOhpbpNdogawnyIKvQS0wk2mjqrgGQ5S"
"ucictfqpdkK9iS1zeFw8sg7s7cwAF7suIfUfeyueLpfosjn3"
"aJIazqqWkm7.net"))
self.assertTrue(user.validate() is None)
user = User(email="new-tld@example.technology")
self.assertTrue(user.validate() is None)
user = User(email='me@localhost')
self.assertRaises(ValidationError, user.validate)
user = User(email="ross@example.com.")
self.assertRaises(ValidationError, user.validate)
def test_email_field_honors_regex(self):
class User(Document):
email = EmailField(regex=r'\w+@example.com')
# Fails regex validation
user = User(email='me@foo.com')
self.assertRaises(ValidationError, user.validate)
# Passes regex validation
user = User(email='me@example.com')
self.assertTrue(user.validate() is None)
def test_tuples_as_tuples(self):
"""
Ensure that tuples remain tuples when they are
inside a ComplexBaseField
"""
from mongoengine.base import BaseField
class EnumField(BaseField):
def __init__(self, **kwargs):
super(EnumField, self).__init__(**kwargs)
def to_mongo(self, value):
return value
def to_python(self, value):
return tuple(value)
class TestDoc(Document):
items = ListField(EnumField())
TestDoc.drop_collection()
tuples = [(100, 'Testing')]
doc = TestDoc()
doc.items = tuples
doc.save()
x = TestDoc.objects().get()
self.assertTrue(x is not None)
self.assertTrue(len(x.items) == 1)
self.assertTrue(tuple(x.items[0]) in tuples)
self.assertTrue(x.items[0] in tuples)
def test_dynamic_fields_class(self):
class Doc2(Document):
field_1 = StringField(db_field='f')
class Doc(Document):
my_id = IntField(required=True, unique=True, primary_key=True)
embed_me = DynamicField(db_field='e')
field_x = StringField(db_field='x')
Doc.drop_collection()
Doc2.drop_collection()
doc2 = Doc2(field_1="hello")
doc = Doc(my_id=1, embed_me=doc2, field_x="x")
self.assertRaises(OperationError, doc.save)
doc2.save()
doc.save()
doc = Doc.objects.get()
self.assertEqual(doc.embed_me.field_1, "hello")
def test_dynamic_fields_embedded_class(self):
class Embed(EmbeddedDocument):
field_1 = StringField(db_field='f')
class Doc(Document):
my_id = IntField(required=True, unique=True, primary_key=True)
embed_me = DynamicField(db_field='e')
field_x = StringField(db_field='x')
Doc.drop_collection()
Doc(my_id=1, embed_me=Embed(field_1="hello"), field_x="x").save()
doc = Doc.objects.get()
self.assertEqual(doc.embed_me.field_1, "hello")
def test_invalid_dict_value(self):
class DictFieldTest(Document):
dictionary = DictField(required=True)
DictFieldTest.drop_collection()
test = DictFieldTest(dictionary=None)
test.dictionary # Just access to test getter
self.assertRaises(ValidationError, test.validate)
test = DictFieldTest(dictionary=False)
test.dictionary # Just access to test getter
self.assertRaises(ValidationError, test.validate)
def test_cls_field(self):
class Animal(Document):
meta = {'allow_inheritance': True}
class Fish(Animal):
pass
class Mammal(Animal):
pass
class Dog(Mammal):
pass
class Human(Mammal):
pass
Animal.objects.delete()
Dog().save()
Fish().save()
Human().save()
self.assertEquals(Animal.objects(_cls__in=["Animal.Mammal.Dog", "Animal.Fish"]).count(), 2)
self.assertEquals(Animal.objects(_cls__in=["Animal.Fish.Guppy"]).count(), 0)
def test_sparse_field(self):
class Doc(Document):
name = StringField(required=False, unique=True, sparse=True)
try:
Doc().save()
Doc().save()
except Exception:
self.fail()
def test_undefined_field_exception(self):
"""Tests if a `FieldDoesNotExist` exception is raised when trying to
instanciate a document with a field that's not defined.
"""
class Doc(Document):
foo = StringField(db_field='f')
def test():
Doc(bar='test')
self.assertRaises(FieldDoesNotExist, test)
def test_undefined_field_exception_with_strict(self):
"""Tests if a `FieldDoesNotExist` exception is raised when trying to
instanciate a document with a field that's not defined,
even when strict is set to False.
"""
class Doc(Document):
foo = StringField(db_field='f')
meta = {'strict': False}
def test():
Doc(bar='test')
self.assertRaises(FieldDoesNotExist, test)
class EmbeddedDocumentListFieldTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.db = connect(db='EmbeddedDocumentListFieldTestCase')
class Comments(EmbeddedDocument):
author = StringField()
message = StringField()
class BlogPost(Document):
comments = EmbeddedDocumentListField(Comments)
cls.Comments = Comments
cls.BlogPost = BlogPost
def setUp(self):
"""
Create two BlogPost entries in the database, each with
several EmbeddedDocuments.
"""
self.post1 = self.BlogPost(comments=[
self.Comments(author='user1', message='message1'),
self.Comments(author='user2', message='message1')
]).save()
self.post2 = self.BlogPost(comments=[
self.Comments(author='user2', message='message2'),
self.Comments(author='user2', message='message3'),
self.Comments(author='user3', message='message1')
]).save()
def tearDown(self):
self.BlogPost.drop_collection()
@classmethod
def tearDownClass(cls):
cls.db.drop_database('EmbeddedDocumentListFieldTestCase')
def test_no_keyword_filter(self):
"""
Tests the filter method of a List of Embedded Documents
with a no keyword.
"""
filtered = self.post1.comments.filter()
# Ensure nothing was changed
# < 2.6 Incompatible >
# self.assertListEqual(filtered, self.post1.comments)
self.assertEqual(filtered, self.post1.comments)
def test_single_keyword_filter(self):
"""
Tests the filter method of a List of Embedded Documents
with a single keyword.
"""
filtered = self.post1.comments.filter(author='user1')
# Ensure only 1 entry was returned.
self.assertEqual(len(filtered), 1)
# Ensure the entry returned is the correct entry.
self.assertEqual(filtered[0].author, 'user1')
def test_multi_keyword_filter(self):
"""
Tests the filter method of a List of Embedded Documents
with multiple keywords.
"""
filtered = self.post2.comments.filter(
author='user2', message='message2'
)
# Ensure only 1 entry was returned.
self.assertEqual(len(filtered), 1)
# Ensure the entry returned is the correct entry.
self.assertEqual(filtered[0].author, 'user2')
self.assertEqual(filtered[0].message, 'message2')
def test_chained_filter(self):
"""
Tests chained filter methods of a List of Embedded Documents
"""
filtered = self.post2.comments.filter(author='user2').filter(
message='message2'
)
# Ensure only 1 entry was returned.
self.assertEqual(len(filtered), 1)
# Ensure the entry returned is the correct entry.
self.assertEqual(filtered[0].author, 'user2')
self.assertEqual(filtered[0].message, 'message2')
def test_unknown_keyword_filter(self):
"""
Tests the filter method of a List of Embedded Documents
when the keyword is not a known keyword.
"""
# < 2.6 Incompatible >
# with self.assertRaises(AttributeError):
# self.post2.comments.filter(year=2)
self.assertRaises(AttributeError, self.post2.comments.filter, year=2)
def test_no_keyword_exclude(self):
"""
Tests the exclude method of a List of Embedded Documents
with a no keyword.
"""
filtered = self.post1.comments.exclude()
# Ensure everything was removed
# < 2.6 Incompatible >
# self.assertListEqual(filtered, [])
self.assertEqual(filtered, [])
def test_single_keyword_exclude(self):
"""
Tests the exclude method of a List of Embedded Documents
with a single keyword.
"""
excluded = self.post1.comments.exclude(author='user1')
# Ensure only 1 entry was returned.
self.assertEqual(len(excluded), 1)
# Ensure the entry returned is the correct entry.
self.assertEqual(excluded[0].author, 'user2')
def test_multi_keyword_exclude(self):
"""
Tests the exclude method of a List of Embedded Documents
with multiple keywords.
"""
excluded = self.post2.comments.exclude(
author='user3', message='message1'
)
# Ensure only 2 entries were returned.
self.assertEqual(len(excluded), 2)
# Ensure the entries returned are the correct entries.
self.assertEqual(excluded[0].author, 'user2')
self.assertEqual(excluded[1].author, 'user2')
def test_non_matching_exclude(self):
"""
Tests the exclude method of a List of Embedded Documents
when the keyword does not match any entries.
"""
excluded = self.post2.comments.exclude(author='user4')
# Ensure the 3 entries still exist.
self.assertEqual(len(excluded), 3)
def test_unknown_keyword_exclude(self):
"""
Tests the exclude method of a List of Embedded Documents
when the keyword is not a known keyword.
"""
# < 2.6 Incompatible >
# with self.assertRaises(AttributeError):
# self.post2.comments.exclude(year=2)
self.assertRaises(AttributeError, self.post2.comments.exclude, year=2)
def test_chained_filter_exclude(self):
"""
Tests the exclude method after a filter method of a List of
Embedded Documents.
"""
excluded = self.post2.comments.filter(author='user2').exclude(
message='message2'
)
# Ensure only 1 entry was returned.
self.assertEqual(len(excluded), 1)
# Ensure the entry returned is the correct entry.
self.assertEqual(excluded[0].author, 'user2')
self.assertEqual(excluded[0].message, 'message3')
def test_count(self):
"""
Tests the count method of a List of Embedded Documents.
"""
self.assertEqual(self.post1.comments.count(), 2)
self.assertEqual(self.post1.comments.count(), len(self.post1.comments))
def test_filtered_count(self):
"""
Tests the filter + count method of a List of Embedded Documents.
"""
count = self.post1.comments.filter(author='user1').count()
self.assertEqual(count, 1)
def test_single_keyword_get(self):
"""
Tests the get method of a List of Embedded Documents using a
single keyword.
"""
comment = self.post1.comments.get(author='user1')
# < 2.6 Incompatible >
# self.assertIsInstance(comment, self.Comments)
self.assertTrue(isinstance(comment, self.Comments))
self.assertEqual(comment.author, 'user1')
def test_multi_keyword_get(self):
"""
Tests the get method of a List of Embedded Documents using
multiple keywords.
"""
comment = self.post2.comments.get(author='user2', message='message2')
# < 2.6 Incompatible >
# self.assertIsInstance(comment, self.Comments)
self.assertTrue(isinstance(comment, self.Comments))
self.assertEqual(comment.author, 'user2')
self.assertEqual(comment.message, 'message2')
def test_no_keyword_multiple_return_get(self):
"""
Tests the get method of a List of Embedded Documents without
a keyword to return multiple documents.
"""
# < 2.6 Incompatible >
# with self.assertRaises(MultipleObjectsReturned):
# self.post1.comments.get()
self.assertRaises(MultipleObjectsReturned, self.post1.comments.get)
def test_keyword_multiple_return_get(self):
"""
Tests the get method of a List of Embedded Documents with a keyword
to return multiple documents.
"""
# < 2.6 Incompatible >
# with self.assertRaises(MultipleObjectsReturned):
# self.post2.comments.get(author='user2')
self.assertRaises(
MultipleObjectsReturned, self.post2.comments.get, author='user2'
)
def test_unknown_keyword_get(self):
"""
Tests the get method of a List of Embedded Documents with an
unknown keyword.
"""
# < 2.6 Incompatible >
# with self.assertRaises(AttributeError):
# self.post2.comments.get(year=2020)
self.assertRaises(AttributeError, self.post2.comments.get, year=2020)
def test_no_result_get(self):
"""
Tests the get method of a List of Embedded Documents where get
returns no results.
"""
# < 2.6 Incompatible >
# with self.assertRaises(DoesNotExist):
# self.post1.comments.get(author='user3')
self.assertRaises(
DoesNotExist, self.post1.comments.get, author='user3'
)
def test_first(self):
"""
Tests the first method of a List of Embedded Documents to
ensure it returns the first comment.
"""
comment = self.post1.comments.first()
# Ensure a Comment object was returned.
# < 2.6 Incompatible >
# self.assertIsInstance(comment, self.Comments)
self.assertTrue(isinstance(comment, self.Comments))
self.assertEqual(comment, self.post1.comments[0])
def test_create(self):
"""
Test the create method of a List of Embedded Documents.
"""
comment = self.post1.comments.create(
author='user4', message='message1'
)
self.post1.save()
# Ensure the returned value is the comment object.
# < 2.6 Incompatible >
# self.assertIsInstance(comment, self.Comments)
self.assertTrue(isinstance(comment, self.Comments))
self.assertEqual(comment.author, 'user4')
self.assertEqual(comment.message, 'message1')
# Ensure the new comment was actually saved to the database.
# < 2.6 Incompatible >
# self.assertIn(
# comment,
# self.BlogPost.objects(comments__author='user4')[0].comments
# )
self.assertTrue(
comment in self.BlogPost.objects(
comments__author='user4'
)[0].comments
)
def test_filtered_create(self):
"""
Test the create method of a List of Embedded Documents chained
to a call to the filter method. Filtering should have no effect
on creation.
"""
comment = self.post1.comments.filter(author='user1').create(
author='user4', message='message1'
)
self.post1.save()
# Ensure the returned value is the comment object.
# < 2.6 Incompatible >
# self.assertIsInstance(comment, self.Comments)
self.assertTrue(isinstance(comment, self.Comments))
self.assertEqual(comment.author, 'user4')
self.assertEqual(comment.message, 'message1')
# Ensure the new comment was actually saved to the database.
# < 2.6 Incompatible >
# self.assertIn(
# comment,
# self.BlogPost.objects(comments__author='user4')[0].comments
# )
self.assertTrue(
comment in self.BlogPost.objects(
comments__author='user4'
)[0].comments
)
def test_no_keyword_update(self):
"""
Tests the update method of a List of Embedded Documents with
no keywords.
"""
original = list(self.post1.comments)
number = self.post1.comments.update()
self.post1.save()
# Ensure that nothing was altered.
# < 2.6 Incompatible >
# self.assertIn(
# original[0],
# self.BlogPost.objects(id=self.post1.id)[0].comments
# )
self.assertTrue(
original[0] in self.BlogPost.objects(id=self.post1.id)[0].comments
)
# < 2.6 Incompatible >
# self.assertIn(
# original[1],
# self.BlogPost.objects(id=self.post1.id)[0].comments
# )
self.assertTrue(
original[1] in self.BlogPost.objects(id=self.post1.id)[0].comments
)
# Ensure the method returned 0 as the number of entries
# modified
self.assertEqual(number, 0)
def test_single_keyword_update(self):
"""
Tests the update method of a List of Embedded Documents with
a single keyword.
"""
number = self.post1.comments.update(author='user4')
self.post1.save()
comments = self.BlogPost.objects(id=self.post1.id)[0].comments
# Ensure that the database was updated properly.
self.assertEqual(comments[0].author, 'user4')
self.assertEqual(comments[1].author, 'user4')
# Ensure the method returned 2 as the number of entries
# modified
self.assertEqual(number, 2)
def test_save(self):
"""
Tests the save method of a List of Embedded Documents.
"""
comments = self.post1.comments
new_comment = self.Comments(author='user4')
comments.append(new_comment)
comments.save()
# Ensure that the new comment has been added to the database.
# < 2.6 Incompatible >
# self.assertIn(
# new_comment,
# self.BlogPost.objects(id=self.post1.id)[0].comments
# )
self.assertTrue(
new_comment in self.BlogPost.objects(id=self.post1.id)[0].comments
)
def test_delete(self):
"""
Tests the delete method of a List of Embedded Documents.
"""
number = self.post1.comments.delete()
self.post1.save()
# Ensure that all the comments under post1 were deleted in the
# database.
# < 2.6 Incompatible >
# self.assertListEqual(
# self.BlogPost.objects(id=self.post1.id)[0].comments, []
# )
self.assertEqual(
self.BlogPost.objects(id=self.post1.id)[0].comments, []
)
# Ensure that post1 comments were deleted from the list.
# < 2.6 Incompatible >
# self.assertListEqual(self.post1.comments, [])
self.assertEqual(self.post1.comments, [])
# Ensure that comments still returned a EmbeddedDocumentList object.
# < 2.6 Incompatible >
# self.assertIsInstance(self.post1.comments, EmbeddedDocumentList)
self.assertTrue(isinstance(self.post1.comments, EmbeddedDocumentList))
# Ensure that the delete method returned 2 as the number of entries
# deleted from the database
self.assertEqual(number, 2)
def test_empty_list_embedded_documents_with_unique_field(self):
"""
Tests that only one document with an empty list of embedded documents
that have a unique field can be saved, but if the unique field is
also sparse than multiple documents with an empty list can be saved.
"""
class EmbeddedWithUnique(EmbeddedDocument):
number = IntField(unique=True)
class A(Document):
my_list = ListField(EmbeddedDocumentField(EmbeddedWithUnique))
a1 = A(my_list=[]).save()
self.assertRaises(NotUniqueError, lambda: A(my_list=[]).save())
class EmbeddedWithSparseUnique(EmbeddedDocument):
number = IntField(unique=True, sparse=True)
class B(Document):
my_list = ListField(EmbeddedDocumentField(EmbeddedWithSparseUnique))
b1 = B(my_list=[]).save()
b2 = B(my_list=[]).save()
def test_filtered_delete(self):
"""
Tests the delete method of a List of Embedded Documents
after the filter method has been called.
"""
comment = self.post1.comments[1]
number = self.post1.comments.filter(author='user2').delete()
self.post1.save()
# Ensure that only the user2 comment was deleted.
# < 2.6 Incompatible >
# self.assertNotIn(
# comment, self.BlogPost.objects(id=self.post1.id)[0].comments
# )
self.assertTrue(
comment not in self.BlogPost.objects(id=self.post1.id)[0].comments
)
self.assertEqual(
len(self.BlogPost.objects(id=self.post1.id)[0].comments), 1
)
# Ensure that the user2 comment no longer exists in the list.
# < 2.6 Incompatible >
# self.assertNotIn(comment, self.post1.comments)
self.assertTrue(comment not in self.post1.comments)
self.assertEqual(len(self.post1.comments), 1)
# Ensure that the delete method returned 1 as the number of entries
# deleted from the database
self.assertEqual(number, 1)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "59f39424a922b2381db7592ed863b6e5",
"timestamp": "",
"source": "github",
"line_count": 3755,
"max_line_length": 113,
"avg_line_length": 32.20825565912117,
"alnum_prop": 0.5828082882704106,
"repo_name": "starsirius/mongoengine",
"id": "fd083c7306a165a51a819a3271d9ece90eed82fc",
"size": "120968",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/fields/fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "995922"
}
],
"symlink_target": ""
} |
"""
BaseModel for database models
"""
from peewee import Model, SqliteDatabase
DATABASE = SqliteDatabase('/var/www/sourcemon/sourcemon.db')
class BaseModel(Model):
"""
BaseModel for database models
"""
def database(self):
"""
Returns database connection
"""
return DATABASE
class Meta:
"""
Meta
"""
database = DATABASE
| {
"content_hash": "d3a76ee8adcf1a0b83e2c5dbfa3d67fe",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 60,
"avg_line_length": 18.391304347826086,
"alnum_prop": 0.5602836879432624,
"repo_name": "michaelimfeld/sourcemon",
"id": "85868155ecd704ef80ba07688b273e15761b0def",
"size": "423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sourcemon/model/basemodel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5421"
},
{
"name": "JavaScript",
"bytes": "4156"
},
{
"name": "Makefile",
"bytes": "651"
},
{
"name": "Python",
"bytes": "4805"
}
],
"symlink_target": ""
} |
import datetime
import json
import queue
import decimal
import uuid
import inspect
import distutils.util
import time
import argparse
import boto3
import ibapi.wrapper
from botocore.exceptions import ClientError
from ibapi import (comm)
from ibapi.client import EClient
from ibapi.common import *
from ibapi.contract import *
from ibapi.contract import Contract
from ibapi.errors import *
from ibapi.ticktype import TickType, TickTypeEnum
from ibapi.utils import *
from ibapi.utils import (BadMessage)
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
class Utils(object):
def __init__(self):
pass
@staticmethod
def reliable(func):
def _decorator(self, *args, **kwargs):
tries = 0
result = func(self, *args, **kwargs)
if result is None:
while result is None and tries < 10:
tries += 1
time.sleep(2 ** tries)
result = func(self, *args, **kwargs)
return result
return _decorator
class InterruptableClient(EClient):
def __init__(self):
EClient.__init__(self, self)
self.lastStamp = datetime.datetime.utcnow()
def runnable(self, func):
"""This is the function that has the message loop."""
try:
while not self.done and (self.conn.isConnected()
or not self.msg_queue.empty()):
try:
try:
text = self.msg_queue.get(block=True, timeout=0.2)
if len(text) > MAX_MSG_LEN:
self.wrapper.error(NO_VALID_ID, BAD_LENGTH.code(),
"%s:%d:%s" % (BAD_LENGTH.msg(), len(text), text))
self.disconnect()
break
except queue.Empty:
if datetime.datetime.utcnow() - self.lastStamp > datetime.timedelta(seconds=30):
func()
self.lastStamp = datetime.datetime.utcnow()
logging.debug("queue.get: empty")
else:
fields = comm.read_fields(text)
logging.debug("fields %s", fields)
self.decoder.interpret(fields)
except (KeyboardInterrupt, SystemExit):
logging.info("detected KeyboardInterrupt, SystemExit")
self.keyboardInterrupt()
self.keyboardInterruptHard()
except BadMessage:
logging.info("BadMessage")
self.conn.disconnect()
logging.debug("conn:%d queue.sz:%d",
self.conn.isConnected(),
self.msg_queue.qsize())
finally:
self.disconnect()
class CloudLogger(object):
def __init__(self):
self.__fileLogger = logging.getLogger()
self.__fileLogger.setLevel(logging.INFO)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(threadName)s - %(message)s')
self.__cloudWatchLogger = boto3.client('logs')
self.__groupName = '/aws/docker/Capsule'
self.__sequenceToken = None
self.__stream = (datetime.datetime.today().strftime('%Y/%m/%d/[$LATEST]'), uuid.uuid4().hex)
response = self.__cloudWatchLogger.create_log_stream(
logGroupName=self.__groupName,
logStreamName='%s%s' % self.__stream
)
self.info('LogStream Created: %s' % response)
def __logToStream(self, msg):
if self.__sequenceToken is None:
response = self.__cloudWatchLogger\
.put_log_events(logGroupName=self.__groupName, logStreamName='%s%s' % self.__stream,
logEvents=[dict(timestamp=int(round(time.time() * 1000)),
message=time.strftime("%m/%d/%Y %H:%M:%S") + msg)])
else:
response = self.__cloudWatchLogger\
.put_log_events(logGroupName=self.__groupName, logStreamName='%s%s' % self.__stream,
logEvents=[dict(timestamp=int(round(time.time() * 1000)),
message=msg)],
sequenceToken=self.__sequenceToken)
self.__sequenceToken = response['nextSequenceToken']
def info(self, msg):
self.__fileLogger.info(msg)
name = inspect.getframeinfo(inspect.currentframe()).function.upper()
self.__logToStream('%s [%s] %s' % (time.strftime("%m/%d/%Y %H:%M:%S"), name, msg))
def debug(self, msg):
self.__fileLogger.debug(msg)
name = inspect.getframeinfo(inspect.currentframe()).function.upper()
self.__logToStream('%s [%s] %s' % (time.strftime("%m/%d/%Y %H:%M:%S"), name, msg))
def warning(self, msg):
self.__fileLogger.warning(msg)
name = inspect.getframeinfo(inspect.currentframe()).function.upper()
self.__logToStream('%s [%s] %s' % (time.strftime("%m/%d/%Y %H:%M:%S"), name, msg))
def error(self, msg):
self.__fileLogger.error(msg)
name = inspect.getframeinfo(inspect.currentframe()).function.upper()
self.__logToStream('%s [%s] %s' % (time.strftime("%m/%d/%Y %H:%M:%S"), name, msg))
class IbApp(InterruptableClient, ibapi.wrapper.EWrapper):
def __init__(self, real, hist):
self.__subRealMD = real
self.__subHistMD = hist
self.Logger = CloudLogger()
InterruptableClient.__init__(self)
self.nextValidOrderId = None
self.nextValidReqId = None
self.requestedContracts = {}
self.requestedMarketData = {}
self.requestedHistoricalData = {}
self.marketDataLookup = {}
self.historicalLookup = {}
db = boto3.resource('dynamodb', region_name='us-east-1')
self.__Securities = db.Table('Securities')
self.__QuotesEod = db.Table('Quotes.EOD')
def __del__(self):
self.disconnect()
@Utils.reliable
def getSecurities(self):
try:
self.Logger.info('Calling securities scan ...')
response = self.__Securities.scan()
except ClientError as e:
self.Logger.error(e.response['Error']['Message'])
return None
except Exception as e:
self.Logger.error(e)
return None
else:
# self.Logger.info(json.dumps(security, indent=4, cls=DecimalEncoder))
if 'Items' in response:
return response['Items']
def UpdateQuote(self, symbol, date, opn, close, high, low, volume, barCount):
try:
details = {"Open": decimal.Decimal(str(opn)), "Close": decimal.Decimal(str(close)),
"High": decimal.Decimal(str(high)), "Low": decimal.Decimal(str(low)),
"Volume": volume, "Count": barCount}
response = self.__QuotesEod.update_item(
Key={
'Symbol': symbol,
'Date': date,
},
UpdateExpression="set #d = :d, #s = :s",
ExpressionAttributeNames={
'#d': 'Details',
'#s': 'Source',
},
ExpressionAttributeValues={
':d': details,
':s': 'IB',
},
ReturnValues="UPDATED_NEW")
except ClientError as e:
self.Logger.error(e.response['Error']['Message'])
except Exception as e:
self.Logger.error(e)
else:
self.Logger.debug(json.dumps(response, indent=4, cls=DecimalEncoder))
def verify(self):
self.Logger.info('requesting server time')
self.reqCurrentTime()
for key, value in self.requestedContracts.items():
self.reqContractDetails(key, value)
self.Logger.info('re-requesting contract details for: %s' % value.symbol)
if self.__subHistMD:
for key, value in self.requestedHistoricalData.items():
self.reqHistoricalData(key, value, '', "2 D", "1 day", "TRADES", 1, 1, False, list("XYZ"))
self.Logger.info('re-requesting Historical Data for: %s' % value.symbol)
if self.__subRealMD:
for key, value in self.requestedMarketData.items():
self.reqMktData(key, value, "", True, False, [])
self.Logger.info('re-requesting Market Data for: %s' % value.symbol)
def loop(self):
self.runnable(self.verify)
def start(self):
self.Logger.info('start for read data %s and historical %s' % (self.__subRealMD, self.__subHistMD))
items = self.getSecurities()
for sec in items:
if sec['SubscriptionEnabled']:
contract = Contract()
contract.symbol = sec['Symbol']
contract.secType = sec['ProductType']
contract.exchange = sec['Description']['Exchange']
if contract.secType == 'FUT':
contract.tradingClass = sec['Symbol']
rId = self.nextReqId()
self.requestedContracts[rId] = contract
self.reqContractDetails(rId, contract)
def nextReqId(self):
reqId = self.nextValidReqId
self.nextValidReqId += 1
return reqId
def nextOrderId(self):
orderId = self.nextValidOrderId
self.nextValidOrderId += 1
return orderId
@iswrapper
def contractDetails(self, reqId: int, contractDetails: ContractDetails):
super(IbApp, self).contractDetails(reqId, contractDetails)
self.Logger.info('contractDetails received %s ' % contractDetails.summary)
if reqId not in self.requestedContracts:
self.Logger.warning('Unknown contractDetails reqId: %s' % reqId)
return
contract = self.requestedContracts[reqId]
if contract.symbol == contractDetails.summary.symbol or contract.symbol == contractDetails.marketName:
validated = Contract()
validated.symbol = contractDetails.summary.symbol
validated.secType = contractDetails.summary.secType
validated.exchange = contractDetails.summary.exchange
validated.tradingClass = contractDetails.summary.tradingClass
validated.lastTradeDateOrContractMonth = contractDetails.summary.lastTradeDateOrContractMonth
validated.localSymbol = contractDetails.summary.localSymbol
if self.__subRealMD:
cId = self.nextReqId()
self.marketDataLookup[cId] = validated.localSymbol
self.requestedMarketData[cId] = validated
self.reqMktData(cId, contract, "", True, False, [])
if self.__subHistMD:
hId = self.nextReqId()
self.historicalLookup[hId] = validated.localSymbol
self.requestedHistoricalData[hId] = validated
self.reqHistoricalData(hId, validated, '', "2 D", "1 day", "TRADES", 1, 1, False, list("XYZ"))
else:
self.Logger.warning('Unknown contract received %s' % contractDetails.summary)
@iswrapper
def contractDetailsEnd(self, reqId: int):
super(IbApp, self).contractDetailsEnd(reqId)
self.Logger.info("ContractDetailsEnd. %s" % reqId)
if reqId in self.requestedContracts:
del self.requestedContracts[reqId]
@iswrapper
def historicalData(self, reqId: TickerId, bar: BarData):
sym = self.historicalLookup[reqId]
self.Logger.info("HistoricalData. " + sym + " Date: " + bar.date + " Open: " + str(bar.open) +
" High: " + str(bar.high) + " Low: " + str(bar.low) + " Close: " + str(bar.close) + " Volume: "
+ str(bar.volume) + " Count: " + str(bar.barCount))
if reqId in self.requestedHistoricalData:
del self.requestedHistoricalData[reqId]
self.UpdateQuote(sym, bar.date, bar.open, bar.close, bar.high, bar.low, bar.volume, bar.barCount)
@iswrapper
def historicalDataEnd(self, reqId: int, start: str, end: str):
super(IbApp, self).historicalDataEnd(reqId, start, end)
self.Logger.info("HistoricalDataEnd " + str(reqId) + " from " + start + " to " + end)
@iswrapper
def tickSnapshotEnd(self, reqId: int):
super(IbApp, self).tickSnapshotEnd(reqId)
self.Logger.info("TickSnapshotEnd: %s" % reqId)
@iswrapper
def nextValidId(self, orderId: int):
super(IbApp, self).nextValidId(orderId)
self.Logger.info("setting nextValidOrderId: %d" % orderId)
self.nextValidOrderId = orderId
self.nextValidReqId = orderId
self.start()
@iswrapper
def marketDataType(self, reqId: TickerId, marketDataType: int):
super(IbApp, self).marketDataType(reqId, marketDataType)
self.Logger.info("MarketDataType. %s Type: %s" % (reqId, marketDataType))
@iswrapper
def error(self, *args):
super(IbApp, self).error(*args)
@iswrapper
def winError(self, *args):
super(IbApp, self).error(*args)
@iswrapper
def currentTime(self, tim: int):
super(IbApp, self).currentTime(tim)
self.Logger.info('currentTime: %s' % tim)
@iswrapper
def tickPrice(self, tickerId: TickerId, tickType: TickType, price: float, attrib):
symbol = self.marketDataLookup[tickerId]
self.Logger.info('%s %s %s %s IB' % (datetime.datetime.now(), symbol, TickTypeEnum.to_str(tickType), price))
if tickerId in self.requestedMarketData:
del self.requestedMarketData[tickerId]
@iswrapper
def tickSize(self, tickerId: TickerId, tickType: TickType, size: int):
symbol = self.marketDataLookup[tickerId]
self.Logger.info('%s %s %s %s IB' % (datetime.datetime.now(), symbol, TickTypeEnum.to_str(tickType), size))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--host', help='IB host', required=True)
parser.add_argument('--port', help='IB port', type=int, required=True)
parser.add_argument('--clientId', help='IB client id', type=int, required=True)
parser.add_argument('--real', help='IB Market Data', type=lambda x: bool(distutils.util.strtobool(x)),
required=True)
parser.add_argument('--hist', help='IB Historical', type=lambda x: bool(distutils.util.strtobool(x)), required=True)
args = parser.parse_args()
app = IbApp(args.real, args.hist)
app.connect(args.host, args.port, args.clientId)
app.Logger.info("serverVersion:%s connectionTime:%s" % (app.serverVersion(), app.twsConnectionTime()))
app.loop()
if __name__ == "__main__":
main()
| {
"content_hash": "2d474a24224abae56272edbba9e8a675",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 120,
"avg_line_length": 40.46010638297872,
"alnum_prop": 0.5768093078288306,
"repo_name": "th3sys/capsule",
"id": "d86c6311bfcc91c859d07dc9a2ae2f41c915cde0",
"size": "15213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibmarketdata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "340486"
},
{
"name": "Shell",
"bytes": "2426"
}
],
"symlink_target": ""
} |
import dataclasses
from typing import Optional
from mobile_seg.const import EXP_DIR
from mylib.params import ParamsMixIn
@dataclasses.dataclass(frozen=True)
class TrainerParams(ParamsMixIn):
num_tpu_cores: Optional[int] = None
gpus: Optional[int] = None
epochs: int = 100
resume_from_checkpoint: Optional[str] = None
save_dir: str = str(EXP_DIR)
distributed_backend: Optional[str] = None
num_nodes: int = 1
accumulate_grad_batches: int = 1
weights_save_path: Optional[str] = None
precision: int = 32
@dataclasses.dataclass(frozen=True)
class ModuleParams(ParamsMixIn):
lr: float = 3e-4
weight_decay: float = 1e-4
optim: str = 'radam'
ema_decay: Optional[float] = None
ema_eval_freq: int = 1
drop_rate: float = 0.
drop_path_rate: float = 0.
@property
def use_ema(self) -> bool:
return self.ema_decay is not None
@dataclasses.dataclass(frozen=True)
class DataParams(ParamsMixIn):
batch_size: int = 32
fold: int = 0 # -1 for cross validation
n_splits: Optional[int] = 5
img_size: int = 224
seed: int = 0
@property
def do_cv(self) -> bool:
return self.fold == -1
@dataclasses.dataclass(frozen=True)
class Params(ParamsMixIn):
module_params: ModuleParams
trainer_params: TrainerParams
data_params: DataParams
note: str = ''
@property
def m(self) -> ModuleParams:
return self.module_params
@property
def t(self) -> TrainerParams:
return self.trainer_params
@property
def d(self) -> DataParams:
return self.data_params
@property
def do_cv(self) -> bool:
return self.d.do_cv
def copy_for_cv(self):
conf_orig = self.dict_config()
return [
Params.from_dict({
**conf_orig,
'data_params': {
**conf_orig.module_params,
'fold': n,
},
})
for n in range(self.d.n_splits)
]
# %%
if __name__ == '__main__':
# %%
p = Params.load('params/001.yaml')
print(p)
# %%
for cp in p.copy_for_cv():
print(cp.pretty())
| {
"content_hash": "18fe648e771d2ac5ad194fab2333e44b",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 48,
"avg_line_length": 21.94,
"alnum_prop": 0.5902461257976299,
"repo_name": "akirasosa/mobile-semantic-segmentation",
"id": "1120a9243acb024f5264802a3ff8134fd3796f0d",
"size": "2194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mobile_seg/params.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "385"
},
{
"name": "Python",
"bytes": "86214"
}
],
"symlink_target": ""
} |
from frigg.builds.filters import BuildPermissionFilter
from frigg.builds.models import Build
from frigg.utils.tests import FiltersTestCase
class BuildsFilterTests(FiltersTestCase):
fixtures = FiltersTestCase.fixtures + ['frigg/builds/fixtures/test_permitted_objects.yaml']
def test_build_permitted_objects(self):
self.filter_test_helper(BuildPermissionFilter(), Build, 5, 4, 2)
| {
"content_hash": "3786917842f9c80f01c11fc14d42bbf7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 95,
"avg_line_length": 39.7,
"alnum_prop": 0.7884130982367759,
"repo_name": "frigg/frigg-hq",
"id": "a8422b96217c0b9d03e9b93025a5ab4372e6a4f4",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/builds/test_filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3504"
},
{
"name": "HTML",
"bytes": "8114"
},
{
"name": "JavaScript",
"bytes": "5982"
},
{
"name": "Makefile",
"bytes": "1164"
},
{
"name": "Python",
"bytes": "182545"
}
],
"symlink_target": ""
} |
from sqlalchemy.orm import sessionmaker
from models import News, Jobs, db_connect
NEWS_SPIDERS = ["mediationworld", "mediatedotcom", "sciencedaily", "undotorg"]
JOBS_SPIDERS = ["findaphd", "dmejobs", "unjobs", "unpeacejobs"]
class NewsPipeline(object):
def __init__(self):
"""
Initializes database connection and sessionmaker.
"""
engine = db_connect()
self.Session = sessionmaker(bind=engine)
def process_item(self, item, spider):
print "Processing News Pipeline"
if spider.name in NEWS_SPIDERS:
"""Save news items to the database.
This method is called for every item pipeline component.
"""
session = self.Session()
news = News(**item)
try:
if session.query(News).filter(News.title == news.title).count() == 0:
session.add(news)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
else:
pass
return item
class JobsPipeline(object):
def __init__(self):
"""
Initializes database connection and sessionmaker.
"""
engine = db_connect()
self.Session = sessionmaker(bind=engine)
def process_item(self, item, spider):
print "Processing Jobs Pipeline"
if spider.name in JOBS_SPIDERS:
"""Save jobs items to the database.
This method is called for every item pipeline component.
"""
session = self.Session()
jobs = Jobs(**item)
try:
if session.query(Jobs).filter(Jobs.title == jobs.title).count() == 0:
session.add(jobs)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
else:
pass
return item
| {
"content_hash": "86bd354ebf9515472bcbe8f6df5faa40",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 85,
"avg_line_length": 28.51388888888889,
"alnum_prop": 0.5207014125669751,
"repo_name": "sebastianlees/Conflict-Gateway",
"id": "f0939d804cc1d71ede026ad9ece4c6f6d585f173",
"size": "2246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conflictgateway/scraper/scraper/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5674"
},
{
"name": "HTML",
"bytes": "60611"
},
{
"name": "JavaScript",
"bytes": "4690"
},
{
"name": "Python",
"bytes": "51798"
},
{
"name": "Shell",
"bytes": "365"
}
],
"symlink_target": ""
} |
import datetime
from rqalpha.interface import AbstractEventSource
from rqalpha.events import Event, EVENT
from rqalpha.utils import get_account_type
from rqalpha.utils.exception import CustomException, CustomError, patch_user_exc
from rqalpha.utils.datetime_func import convert_int_to_datetime
from rqalpha.const import ACCOUNT_TYPE
from rqalpha.utils.i18n import gettext as _
ONE_MINUTE = datetime.timedelta(minutes=1)
class SimulationEventSource(AbstractEventSource):
def __init__(self, env, account_list):
self._env = env
self._account_list = account_list
self._universe_changed = False
self._env.event_bus.add_listener(EVENT.POST_UNIVERSE_CHANGED, self._on_universe_changed)
def _on_universe_changed(self, event):
self._universe_changed = True
def _get_universe(self):
universe = self._env.get_universe()
if len(universe) == 0 and ACCOUNT_TYPE.STOCK not in self._account_list:
error = CustomError()
error.set_msg("Current universe is empty. Please use subscribe function before trade")
raise patch_user_exc(CustomException(error))
return universe
# [BEGIN] minute event helper
@staticmethod
def _get_stock_trading_minutes(trading_date):
trading_minutes = set()
current_dt = datetime.datetime.combine(trading_date, datetime.time(9, 31))
am_end_dt = current_dt.replace(hour=11, minute=30)
pm_start_dt = current_dt.replace(hour=13, minute=1)
pm_end_dt = current_dt.replace(hour=15, minute=0)
delta_minute = datetime.timedelta(minutes=1)
while current_dt <= am_end_dt:
trading_minutes.add(current_dt)
current_dt += delta_minute
current_dt = pm_start_dt
while current_dt <= pm_end_dt:
trading_minutes.add(current_dt)
current_dt += delta_minute
return trading_minutes
def _get_future_trading_minutes(self, trading_date):
trading_minutes = set()
universe = self._get_universe()
for order_book_id in universe:
if get_account_type(order_book_id) == ACCOUNT_TYPE.STOCK:
continue
trading_minutes.update(self._env.data_proxy.get_trading_minutes_for(order_book_id, trading_date))
return set([convert_int_to_datetime(minute) for minute in trading_minutes])
def _get_trading_minutes(self, trading_date):
trading_minutes = set()
for account_type in self._account_list:
if account_type == ACCOUNT_TYPE.STOCK:
trading_minutes = trading_minutes.union(self._get_stock_trading_minutes(trading_date))
elif account_type == ACCOUNT_TYPE.FUTURE:
trading_minutes = trading_minutes.union(self._get_future_trading_minutes(trading_date))
return sorted(list(trading_minutes))
# [END] minute event helper
def events(self, start_date, end_date, frequency):
if frequency == "1d":
# 根据起始日期和结束日期,获取所有的交易日,然后再循环获取每一个交易日
for day in self._env.data_proxy.get_trading_dates(start_date, end_date):
date = day.to_pydatetime()
dt_before_trading = date.replace(hour=0, minute=0)
dt_bar = date.replace(hour=15, minute=0)
dt_after_trading = date.replace(hour=15, minute=30)
dt_settlement = date.replace(hour=17, minute=0)
yield Event(EVENT.BEFORE_TRADING, calendar_dt=dt_before_trading, trading_dt=dt_before_trading)
yield Event(EVENT.BAR, calendar_dt=dt_bar, trading_dt=dt_bar)
yield Event(EVENT.AFTER_TRADING, calendar_dt=dt_after_trading, trading_dt=dt_after_trading)
yield Event(EVENT.SETTLEMENT, calendar_dt=dt_settlement, trading_dt=dt_settlement)
elif frequency == '1m':
for day in self._env.data_proxy.get_trading_dates(start_date, end_date):
before_trading_flag = True
date = day.to_pydatetime()
last_dt = None
done = False
dt_before_day_trading = date.replace(hour=8, minute=30)
while True:
if done:
break
exit_loop = True
trading_minutes = self._get_trading_minutes(date)
for calendar_dt in trading_minutes:
if last_dt is not None and calendar_dt < last_dt:
continue
if calendar_dt < dt_before_day_trading:
trading_dt = calendar_dt.replace(year=date.year,
month=date.month,
day=date.day)
else:
trading_dt = calendar_dt
if before_trading_flag:
before_trading_flag = False
before_trading_dt = trading_dt - datetime.timedelta(minutes=30)
yield Event(EVENT.BEFORE_TRADING, calendar_dt=before_trading_dt,
trading_dt=before_trading_dt)
if self._universe_changed:
self._universe_changed = False
last_dt = calendar_dt
exit_loop = False
break
# yield handle bar
yield Event(EVENT.BAR, calendar_dt=calendar_dt, trading_dt=trading_dt)
if exit_loop:
done = True
dt = date.replace(hour=15, minute=30)
yield Event(EVENT.AFTER_TRADING, calendar_dt=dt, trading_dt=dt)
dt = date.replace(hour=17, minute=0)
yield Event(EVENT.SETTLEMENT, calendar_dt=dt, trading_dt=dt)
elif frequency == "tick":
data_proxy = self._env.data_proxy
for day in data_proxy.get_trading_dates(start_date, end_date):
date = day.to_pydatetime()
last_tick = None
last_dt = None
while True:
for tick in data_proxy.get_merge_ticks(self._get_universe(), date, last_dt):
# find before trading time
if last_tick is None:
last_tick = tick
dt = tick.datetime
before_trading_dt = dt - datetime.timedelta(minutes=30)
yield Event(EVENT.BEFORE_TRADING, calendar_dt=before_trading_dt,
trading_dt=before_trading_dt)
dt = tick.datetime
yield Event(EVENT.TICK, calendar_dt=dt, trading_dt=dt, tick=tick)
if self._universe_changed:
self._universe_changed = False
last_dt = dt
break
else:
break
dt = date.replace(hour=15, minute=30)
yield Event(EVENT.AFTER_TRADING, calendar_dt=dt, trading_dt=dt)
dt = date.replace(hour=17, minute=0)
yield Event(EVENT.SETTLEMENT, calendar_dt=dt, trading_dt=dt)
else:
raise NotImplementedError(_("Frequency {} is not support.").format(frequency))
| {
"content_hash": "3766bf19fbbbdb0b3d412301b4857cf9",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 110,
"avg_line_length": 46.838509316770185,
"alnum_prop": 0.5488661981169606,
"repo_name": "zhengwsh/InplusTrader_Linux",
"id": "d0c72195ce64e70f08449f56391051b7b7bcdc3c",
"size": "8214",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rqalpha/mod/rqalpha_mod_sys_inplustrader/simulation_event_source.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1727083"
},
{
"name": "C++",
"bytes": "3367509"
},
{
"name": "CMake",
"bytes": "3288"
},
{
"name": "Jupyter Notebook",
"bytes": "10948"
},
{
"name": "Objective-C",
"bytes": "1612"
},
{
"name": "Python",
"bytes": "3819836"
},
{
"name": "Shell",
"bytes": "397"
}
],
"symlink_target": ""
} |
"""
sql Module provide database-relevant functions
Required Library
- sys
- time
- MySQLdb
- pymssql
TODO:
-
"""
import sys
import time
import datetime
from sre_compile import isstring
reload(sys)
sys.setdefaultencoding("utf-8")
class DB:
def __init__(self, config):
self.config = config
self.conn = None
self.cursor = None
self.getConn()
self.query_result = []
def set(self, config):
self.config = config
self.query_result = []
def getConn(self):
host = self.config["host"]
db = self.config["db"]
port = self.config["port"]
type = self.config["type"]
user = self.config["user"]
passwd = self.config["passwd"]
quiet = self.config.get("quiet", False)
failure = 1
while 1:
try:
if failure >= 3:
print "Failure connect to %s(%s)" % (host, db)
break
else:
if type == "MySQL":
try:
import MySQLdb
self.conn = MySQLdb.connect(
host=host, db=db, port=port, user=user, passwd=passwd, use_unicode=False, charset="utf8")
self.cursor = self.conn.cursor()
self.cursor.execute("set names utf8")
if quiet:
pass
else:
print "Connect to %s(MySQL: %s) successfully!" % (host, db)
break
except Exception, e:
print "ERROR: cannot connect to %s(MySQL: %s)! %s" % (host, db, e.message)
failure += 1
time.sleep(3)
if type == "MSSQL":
try:
import pymssql
self.conn = pymssql.connect(
host=host, database=db, port=1433, user=user, password=passwd, charset="utf8")
self.cursor = self.conn.cursor()
if quiet :
pass
else:
print "Connect to %s(MSSQL: %s) successfully!" % (host, db)
break
except Exception, e:
print "ERROR: cannot connect to %s(MSSQL: %s)! %s" % (host, db, e.message)
failure += 1
time.sleep(3)
except Exception, e:
print e.message + " and will restart to connect to %s(%s) in 3secs" % (host, db)
time.sleep(3)
failure += 1
return self.conn
def getCursor(self):
if self.conn and self.cursor:
self.cursor = self.conn.cursor()
else:
self.cursor = self.getConn().cursor()
return self.cursor
def test(self):
self.cursor.execute("SELECT VERSION()")
data = self.cursor.fetchone()
print data
def where(self, data):
comm = " where "
if isstring(data):
return comm + data
for key in data:
if str(data[key]).isdigit():
comm += "%s=%s and " % (key, data[key])
else:
comm += "%s='%s' and " % (key, data[key])
return comm[0:-4]
def insert(self, query, params):
# param = ()
# length = len(data)
# count = 0
# for ii in xrange(0, length):
# param += (tuple(data[ii].split()),)
# if (ii + 1) % 1000 == 0 or (ii + 1) == length:
# count += self.cursor.executemany(query, param)
# self.conn.commit()
# return count
try:
self.cursor.executemany(query, params)
except Exception, e:
print "Execute %s Error: %s " % (query, e)
finally:
self.conn.commit()
def insertDict(self, data, table):
# 拼接 insert 语句
keys = []
values = []
conditions = []
params = []
for key in data:
if isinstance(key, int):
continue
if isinstance(data[key], datetime.date):
if data[key].year < 1900:
data[key] = "1900-01-01 00:00:00"
else:
data[key] = data[key].strftime("%Y-%m-%d %H:%M:%S")
elif isinstance(data[key], long) or isinstance(data[key], int) or isinstance(data[key], float):
data[key] = str(data[key])
elif isinstance(data[key], unicode):
data[key] = data[key].encode("utf-8", "ignore")
keys.append(key)
values.append("%s")
conditions.append("%s=%s" % (key, "%s"))
params.append(data[key])
tmp = params
params.extend(tmp)
keys = ",".join(keys)
values = ",".join(values)
conditions = ",".join(conditions)
comm = "insert into %s(%s) values(%s) on duplicate key update %s" % (table, keys, values, conditions)
# 尝试插入数据
try:
if params == "":
return self.cursor.execute(comm)
else:
return self.cursor.execute(comm, params)
except Exception, e:
print e
return 0
finally:
self.conn.commit()
def update(self, query, params):
try:
self.cursor.executemany(query, params)
except Exception, e:
print "Execute %s Error: %s " % (query, e)
finally:
self.conn.commit()
def delete(self, query):
pass
def select(self, query):
self.query_result = []
self.cursor.execute(query)
while 1:
result = self.cursor.fetchmany(1000)
if len(result) == 0:
break
for item in result:
self.query_result.append(list(item))
return self.query_result, len(self.query_result)
def flush(self, file, split="\t"):
try:
with open(file, "w") as fp:
length = len(self.query_result)
for ii in xrange(0, length):
out_str = ""
item = self.query_result[ii]
for jj in xrange(0, len(item)):
out_str += str(item[jj]) + split
out_str = out_str[:-1]
fp.write(out_str.strip() + "\n")
except Exception, e:
print e.message
def close(self):
self.cursor.close()
self.conn.close()
quiet = self.config.get("quiet", "False")
if quiet == True:
pass
else:
print "Close connection to %s(%s)." % (self.config["host"], self.config["db"])
| {
"content_hash": "c53d4bb9ed69b7884b91feafafa796ad",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 121,
"avg_line_length": 31.477678571428573,
"alnum_prop": 0.450148915047511,
"repo_name": "yan9yu/lib",
"id": "4c7c40846873e7b62f09178a16ee6dfa391cc262",
"size": "7114",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50999"
}
],
"symlink_target": ""
} |
import itertools
import functools
import operator
import warnings
from distutils.version import LooseVersion
import numpy as np
from pandas import compat
from pandas._libs import tslib, algos, lib
from pandas.core.dtypes.common import (
_get_dtype,
is_float, is_scalar,
is_integer, is_complex, is_float_dtype,
is_complex_dtype, is_integer_dtype,
is_bool_dtype, is_object_dtype,
is_numeric_dtype,
is_datetime64_dtype, is_timedelta64_dtype,
is_datetime_or_timedelta_dtype,
is_int_or_datetime_dtype, is_any_int_dtype)
from pandas.core.dtypes.cast import _int64_max, maybe_upcast_putmask
from pandas.core.dtypes.missing import isna, notna
from pandas.core.config import get_option
from pandas.core.common import _values_from_object
_BOTTLENECK_INSTALLED = False
_MIN_BOTTLENECK_VERSION = '1.0.0'
try:
import bottleneck as bn
ver = bn.__version__
_BOTTLENECK_INSTALLED = (LooseVersion(ver) >=
LooseVersion(_MIN_BOTTLENECK_VERSION))
if not _BOTTLENECK_INSTALLED:
warnings.warn(
"The installed version of bottleneck {ver} is not supported "
"in pandas and will be not be used\nThe minimum supported "
"version is {min_ver}\n".format(
ver=ver, min_ver=_MIN_BOTTLENECK_VERSION), UserWarning)
except ImportError: # pragma: no cover
pass
_USE_BOTTLENECK = False
def set_use_bottleneck(v=True):
# set/unset to use bottleneck
global _USE_BOTTLENECK
if _BOTTLENECK_INSTALLED:
_USE_BOTTLENECK = v
set_use_bottleneck(get_option('compute.use_bottleneck'))
class disallow(object):
def __init__(self, *dtypes):
super(disallow, self).__init__()
self.dtypes = tuple(np.dtype(dtype).type for dtype in dtypes)
def check(self, obj):
return hasattr(obj, 'dtype') and issubclass(obj.dtype.type,
self.dtypes)
def __call__(self, f):
@functools.wraps(f)
def _f(*args, **kwargs):
obj_iter = itertools.chain(args, compat.itervalues(kwargs))
if any(self.check(obj) for obj in obj_iter):
msg = 'reduction operation {name!r} not allowed for this dtype'
raise TypeError(msg.format(name=f.__name__.replace('nan', '')))
try:
with np.errstate(invalid='ignore'):
return f(*args, **kwargs)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(args[0]):
raise TypeError(e)
raise
return _f
class bottleneck_switch(object):
def __init__(self, zero_value=None, **kwargs):
self.zero_value = zero_value
self.kwargs = kwargs
def __call__(self, alt):
bn_name = alt.__name__
try:
bn_func = getattr(bn, bn_name)
except (AttributeError, NameError): # pragma: no cover
bn_func = None
@functools.wraps(alt)
def f(values, axis=None, skipna=True, **kwds):
if len(self.kwargs) > 0:
for k, v in compat.iteritems(self.kwargs):
if k not in kwds:
kwds[k] = v
try:
if self.zero_value is not None and values.size == 0:
if values.ndim == 1:
# wrap the 0's if needed
if is_timedelta64_dtype(values):
return lib.Timedelta(0)
return 0
else:
result_shape = (values.shape[:axis] +
values.shape[axis + 1:])
result = np.empty(result_shape)
result.fill(0)
return result
if (_USE_BOTTLENECK and skipna and
_bn_ok_dtype(values.dtype, bn_name)):
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
# twice :(
if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except Exception:
try:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(values):
raise TypeError(e)
raise
return result
return f
def _bn_ok_dtype(dt, name):
# Bottleneck chokes on datetime64
if (not is_object_dtype(dt) and not is_datetime_or_timedelta_dtype(dt)):
# bottleneck does not properly upcast during the sum
# so can overflow
if name == 'nansum':
if dt.itemsize < 8:
return False
return True
return False
def _has_infs(result):
if isinstance(result, np.ndarray):
if result.dtype == 'f8':
return lib.has_infs_f8(result.ravel())
elif result.dtype == 'f4':
return lib.has_infs_f4(result.ravel())
try:
return np.isinf(result).any()
except (TypeError, NotImplementedError):
# if it doesn't support infs, then it can't have infs
return False
def _get_fill_value(dtype, fill_value=None, fill_value_typ=None):
""" return the correct fill value for the dtype of the values """
if fill_value is not None:
return fill_value
if _na_ok_dtype(dtype):
if fill_value_typ is None:
return np.nan
else:
if fill_value_typ == '+inf':
return np.inf
else:
return -np.inf
else:
if fill_value_typ is None:
return tslib.iNaT
else:
if fill_value_typ == '+inf':
# need the max int here
return _int64_max
else:
return tslib.iNaT
def _get_values(values, skipna, fill_value=None, fill_value_typ=None,
isfinite=False, copy=True):
""" utility to get the values view, mask, dtype
if necessary copy and mask using the specified fill_value
copy = True will force the copy
"""
values = _values_from_object(values)
if isfinite:
mask = _isfinite(values)
else:
mask = isna(values)
dtype = values.dtype
dtype_ok = _na_ok_dtype(dtype)
# get our fill value (in case we need to provide an alternative
# dtype for it)
fill_value = _get_fill_value(dtype, fill_value=fill_value,
fill_value_typ=fill_value_typ)
if skipna:
if copy:
values = values.copy()
if dtype_ok:
np.putmask(values, mask, fill_value)
# promote if needed
else:
values, changed = maybe_upcast_putmask(values, mask, fill_value)
elif copy:
values = values.copy()
values = _view_if_needed(values)
# return a platform independent precision dtype
dtype_max = dtype
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
dtype_max = np.int64
elif is_float_dtype(dtype):
dtype_max = np.float64
return values, mask, dtype, dtype_max
def _isfinite(values):
if is_datetime_or_timedelta_dtype(values):
return isna(values)
if (is_complex_dtype(values) or is_float_dtype(values) or
is_integer_dtype(values) or is_bool_dtype(values)):
return ~np.isfinite(values)
return ~np.isfinite(values.astype('float64'))
def _na_ok_dtype(dtype):
return not is_int_or_datetime_dtype(dtype)
def _view_if_needed(values):
if is_datetime_or_timedelta_dtype(values):
return values.view(np.int64)
return values
def _wrap_results(result, dtype):
""" wrap our results if needed """
if is_datetime64_dtype(dtype):
if not isinstance(result, np.ndarray):
result = lib.Timestamp(result)
else:
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
# raise if we have a timedelta64[ns] which is too large
if np.fabs(result) > _int64_max:
raise ValueError("overflow in timedelta operation")
result = lib.Timedelta(result, unit='ns')
else:
result = result.astype('i8').view(dtype)
return result
def nanany(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, False, copy=skipna)
return values.any(axis)
def nanall(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, True, copy=skipna)
return values.all(axis)
@disallow('M8')
@bottleneck_switch(zero_value=0)
def nansum(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
dtype_sum = dtype_max
if is_float_dtype(dtype):
dtype_sum = dtype
elif is_timedelta64_dtype(dtype):
dtype_sum = np.float64
the_sum = values.sum(axis, dtype=dtype_sum)
the_sum = _maybe_null_out(the_sum, axis, mask)
return _wrap_results(the_sum, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmean(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
dtype_sum = dtype_max
dtype_count = np.float64
if is_integer_dtype(dtype) or is_timedelta64_dtype(dtype):
dtype_sum = np.float64
elif is_float_dtype(dtype):
dtype_sum = dtype
dtype_count = dtype
count = _get_counts(mask, axis, dtype=dtype_count)
the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum))
if axis is not None and getattr(the_sum, 'ndim', False):
the_mean = the_sum / count
ct_mask = count == 0
if ct_mask.any():
the_mean[ct_mask] = np.nan
else:
the_mean = the_sum / count if count > 0 else np.nan
return _wrap_results(the_mean, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmedian(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna)
def get_median(x):
mask = notna(x)
if not skipna and not mask.all():
return np.nan
return algos.median(_values_from_object(x[mask]))
if not is_float_dtype(values):
values = values.astype('f8')
values[mask] = np.nan
if axis is None:
values = values.ravel()
notempty = values.size
# an array from a frame
if values.ndim > 1:
# there's a non-empty array to apply over otherwise numpy raises
if notempty:
return _wrap_results(
np.apply_along_axis(get_median, axis, values), dtype)
# must return the correct shape, but median is not defined for the
# empty set so return nans of shape "everything but the passed axis"
# since "axis" is where the reduction would occur if we had a nonempty
# array
shp = np.array(values.shape)
dims = np.arange(values.ndim)
ret = np.empty(shp[dims != axis])
ret.fill(np.nan)
return _wrap_results(ret, dtype)
# otherwise return a scalar value
return _wrap_results(get_median(values) if notempty else np.nan, dtype)
def _get_counts_nanvar(mask, axis, ddof, dtype=float):
dtype = _get_dtype(dtype)
count = _get_counts(mask, axis, dtype=dtype)
d = count - dtype.type(ddof)
# always return NaN, never inf
if is_scalar(count):
if count <= ddof:
count = np.nan
d = np.nan
else:
mask2 = count <= ddof
if mask2.any():
np.putmask(d, mask2, np.nan)
np.putmask(count, mask2, np.nan)
return count, d
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanstd(values, axis=None, skipna=True, ddof=1):
result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof))
return _wrap_results(result, values.dtype)
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanvar(values, axis=None, skipna=True, ddof=1):
values = _values_from_object(values)
dtype = values.dtype
mask = isna(values)
if is_any_int_dtype(values):
values = values.astype('f8')
values[mask] = np.nan
if is_float_dtype(values):
count, d = _get_counts_nanvar(mask, axis, ddof, values.dtype)
else:
count, d = _get_counts_nanvar(mask, axis, ddof)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
# xref GH10242
# Compute variance via two-pass algorithm, which is stable against
# cancellation errors and relatively accurate for small numbers of
# observations.
#
# See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count
if axis is not None:
avg = np.expand_dims(avg, axis)
sqr = _ensure_numeric((avg - values)**2)
np.putmask(sqr, mask, 0)
result = sqr.sum(axis=axis, dtype=np.float64) / d
# Return variance as np.float64 (the datatype used in the accumulator),
# unless we were dealing with a float array, in which case use the same
# precision as the original values array.
if is_float_dtype(dtype):
result = result.astype(dtype)
return _wrap_results(result, values.dtype)
@disallow('M8', 'm8')
def nansem(values, axis=None, skipna=True, ddof=1):
var = nanvar(values, axis, skipna, ddof=ddof)
mask = isna(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count, _ = _get_counts_nanvar(mask, axis, ddof, values.dtype)
var = nanvar(values, axis, skipna, ddof=ddof)
return np.sqrt(var) / np.sqrt(count)
def _nanminmax(meth, fill_value_typ):
@bottleneck_switch()
def reduction(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(
values, skipna, fill_value_typ=fill_value_typ, )
if ((axis is not None and values.shape[axis] == 0) or
values.size == 0):
try:
result = getattr(values, meth)(axis, dtype=dtype_max)
result.fill(np.nan)
except:
result = np.nan
else:
result = getattr(values, meth)(axis)
result = _wrap_results(result, dtype)
return _maybe_null_out(result, axis, mask)
reduction.__name__ = 'nan' + meth
return reduction
nanmin = _nanminmax('min', fill_value_typ='+inf')
nanmax = _nanminmax('max', fill_value_typ='-inf')
@disallow('O')
def nanargmax(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='-inf')
result = values.argmax(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow('O')
def nanargmin(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='+inf')
result = values.argmin(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow('M8', 'm8')
def nanskew(values, axis=None, skipna=True):
""" Compute the sample skewness.
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G1. The algorithm computes this coefficient directly
from the second and third central moment.
"""
values = _values_from_object(values)
mask = isna(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count = _get_counts(mask, axis)
else:
count = _get_counts(mask, axis, dtype=values.dtype)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted ** 2
adjusted3 = adjusted2 * adjusted
m2 = adjusted2.sum(axis, dtype=np.float64)
m3 = adjusted3.sum(axis, dtype=np.float64)
# floating point error
m2 = _zero_out_fperr(m2)
m3 = _zero_out_fperr(m3)
with np.errstate(invalid='ignore', divide='ignore'):
result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2 ** 1.5)
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype)
if isinstance(result, np.ndarray):
result = np.where(m2 == 0, 0, result)
result[count < 3] = np.nan
return result
else:
result = 0 if m2 == 0 else result
if count < 3:
return np.nan
return result
@disallow('M8', 'm8')
def nankurt(values, axis=None, skipna=True):
""" Compute the sample excess kurtosis.
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G2, computed directly from the second and fourth
central moment.
"""
values = _values_from_object(values)
mask = isna(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count = _get_counts(mask, axis)
else:
count = _get_counts(mask, axis, dtype=values.dtype)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted ** 2
adjusted4 = adjusted2 ** 2
m2 = adjusted2.sum(axis, dtype=np.float64)
m4 = adjusted4.sum(axis, dtype=np.float64)
with np.errstate(invalid='ignore', divide='ignore'):
adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3))
numer = count * (count + 1) * (count - 1) * m4
denom = (count - 2) * (count - 3) * m2**2
result = numer / denom - adj
# floating point error
numer = _zero_out_fperr(numer)
denom = _zero_out_fperr(denom)
if not isinstance(denom, np.ndarray):
# if ``denom`` is a scalar, check these corner cases first before
# doing division
if count < 4:
return np.nan
if denom == 0:
return 0
with np.errstate(invalid='ignore', divide='ignore'):
result = numer / denom - adj
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype)
if isinstance(result, np.ndarray):
result = np.where(denom == 0, 0, result)
result[count < 4] = np.nan
return result
@disallow('M8', 'm8')
def nanprod(values, axis=None, skipna=True):
mask = isna(values)
if skipna and not is_any_int_dtype(values):
values = values.copy()
values[mask] = 1
result = values.prod(axis)
return _maybe_null_out(result, axis, mask)
def _maybe_arg_null_out(result, axis, mask, skipna):
# helper function for nanargmin/nanargmax
if axis is None or not getattr(result, 'ndim', False):
if skipna:
if mask.all():
result = -1
else:
if mask.any():
result = -1
else:
if skipna:
na_mask = mask.all(axis)
else:
na_mask = mask.any(axis)
if na_mask.any():
result[na_mask] = -1
return result
def _get_counts(mask, axis, dtype=float):
dtype = _get_dtype(dtype)
if axis is None:
return dtype.type(mask.size - mask.sum())
count = mask.shape[axis] - mask.sum(axis)
if is_scalar(count):
return dtype.type(count)
try:
return count.astype(dtype)
except AttributeError:
return np.array(count, dtype=dtype)
def _maybe_null_out(result, axis, mask):
if axis is not None and getattr(result, 'ndim', False):
null_mask = (mask.shape[axis] - mask.sum(axis)) == 0
if np.any(null_mask):
if is_numeric_dtype(result):
if np.iscomplexobj(result):
result = result.astype('c16')
else:
result = result.astype('f8')
result[null_mask] = np.nan
else:
# GH12941, use None to auto cast null
result[null_mask] = None
elif result is not tslib.NaT:
null_mask = mask.size - mask.sum()
if null_mask == 0:
result = np.nan
return result
def _zero_out_fperr(arg):
if isinstance(arg, np.ndarray):
with np.errstate(invalid='ignore'):
return np.where(np.abs(arg) < 1e-14, 0, arg)
else:
return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg
@disallow('M8', 'm8')
def nancorr(a, b, method='pearson', min_periods=None):
"""
a, b: ndarrays
"""
if len(a) != len(b):
raise AssertionError('Operands to nancorr must have same size')
if min_periods is None:
min_periods = 1
valid = notna(a) & notna(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
f = get_corr_func(method)
return f(a, b)
def get_corr_func(method):
if method in ['kendall', 'spearman']:
from scipy.stats import kendalltau, spearmanr
def _pearson(a, b):
return np.corrcoef(a, b)[0, 1]
def _kendall(a, b):
rs = kendalltau(a, b)
if isinstance(rs, tuple):
return rs[0]
return rs
def _spearman(a, b):
return spearmanr(a, b)[0]
_cor_methods = {
'pearson': _pearson,
'kendall': _kendall,
'spearman': _spearman
}
return _cor_methods[method]
@disallow('M8', 'm8')
def nancov(a, b, min_periods=None):
if len(a) != len(b):
raise AssertionError('Operands to nancov must have same size')
if min_periods is None:
min_periods = 1
valid = notna(a) & notna(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
return np.cov(a, b)[0, 1]
def _ensure_numeric(x):
if isinstance(x, np.ndarray):
if is_integer_dtype(x) or is_bool_dtype(x):
x = x.astype(np.float64)
elif is_object_dtype(x):
try:
x = x.astype(np.complex128)
except:
x = x.astype(np.float64)
else:
if not np.any(x.imag):
x = x.real
elif not (is_float(x) or is_integer(x) or is_complex(x)):
try:
x = float(x)
except Exception:
try:
x = complex(x)
except Exception:
raise TypeError('Could not convert {value!s} to numeric'
.format(value=x))
return x
# NA-friendly array comparisons
def make_nancomp(op):
def f(x, y):
xmask = isna(x)
ymask = isna(y)
mask = xmask | ymask
with np.errstate(all='ignore'):
result = op(x, y)
if mask.any():
if is_bool_dtype(result):
result = result.astype('O')
np.putmask(result, mask, np.nan)
return result
return f
nangt = make_nancomp(operator.gt)
nange = make_nancomp(operator.ge)
nanlt = make_nancomp(operator.lt)
nanle = make_nancomp(operator.le)
naneq = make_nancomp(operator.eq)
nanne = make_nancomp(operator.ne)
| {
"content_hash": "dae6ad246cf4b17988488c8f02c819f9",
"timestamp": "",
"source": "github",
"line_count": 819,
"max_line_length": 79,
"avg_line_length": 29.520146520146522,
"alnum_prop": 0.5791454688340159,
"repo_name": "Winand/pandas",
"id": "388b2ecdff445de3c81374ead3038f0c305512fe",
"size": "24177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/core/nanops.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4071"
},
{
"name": "C",
"bytes": "493226"
},
{
"name": "C++",
"bytes": "17353"
},
{
"name": "HTML",
"bytes": "551706"
},
{
"name": "Makefile",
"bytes": "907"
},
{
"name": "PowerShell",
"bytes": "2972"
},
{
"name": "Python",
"bytes": "12199454"
},
{
"name": "R",
"bytes": "1177"
},
{
"name": "Shell",
"bytes": "23114"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
} |
_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
class Parent(object):
def __init__(self):
pass
def parse(self):
vars(self).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
class Child(Parent):
def __init__(self):
self._cli = {}
def parse(self):
super(Child, self).parse()
def __setattr__(self, name, value):
if '_cli' not in self.__dict__:
super(Child, self).__setattr__(name, value)
return
self._cli[name] = value
def __getattr__(self, name):
try:
return self._cli[name]
except KeyError:
raise AttributeError(
"'_Namespace' object has no attribute '%s'" % name)
def __delattr__(self, name):
# if name == _UNRECOGNIZED_ARGS_ATTR:
# return super(Child, self).__delattr__(name)
try:
del self._cli[name]
except KeyError:
raise AttributeError(
"'_Namespace' object has no attribute '%s'" % name)
| {
"content_hash": "b075770e94ccd8b29ad37dd32e2c0f91",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 67,
"avg_line_length": 26.205128205128204,
"alnum_prop": 0.5283757338551859,
"repo_name": "ronaldbradford/os-demo",
"id": "f6b26605b99a0627b67df07c91b3be873928b122",
"size": "1022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/delattr1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15208"
}
],
"symlink_target": ""
} |
from google.analytics import admin_v1beta
async def sample_search_change_history_events():
# Create a client
client = admin_v1beta.AnalyticsAdminServiceAsyncClient()
# Initialize request argument(s)
request = admin_v1beta.SearchChangeHistoryEventsRequest(
account="account_value",
)
# Make the request
page_result = client.search_change_history_events(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END analyticsadmin_v1beta_generated_AnalyticsAdminService_SearchChangeHistoryEvents_async]
| {
"content_hash": "1ea8f568e71fd236e517236c348fd5fc",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 93,
"avg_line_length": 29.7,
"alnum_prop": 0.7441077441077442,
"repo_name": "googleapis/python-analytics-admin",
"id": "af023f8e951b3ee98766f41ee5a3340f71dbb81a",
"size": "2025",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/analyticsadmin_v1beta_generated_analytics_admin_service_search_change_history_events_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "5576405"
},
{
"name": "Shell",
"bytes": "30687"
}
],
"symlink_target": ""
} |
import unittest
import uuid
import re
import string
import random
from urllib3 import HTTPResponse
from datetime import datetime
from tests.compat import mock
try:
from kubernetes.client.rest import ApiException
from airflow import configuration
from airflow.configuration import conf
from airflow.executors.kubernetes_executor import AirflowKubernetesScheduler
from airflow.executors.kubernetes_executor import KubernetesExecutor
from airflow.executors.kubernetes_executor import KubeConfig
from airflow.executors.kubernetes_executor import KubernetesExecutorConfig
from airflow.kubernetes.worker_configuration import WorkerConfiguration
from airflow.exceptions import AirflowConfigException
from airflow.kubernetes.secret import Secret
except ImportError:
AirflowKubernetesScheduler = None # type: ignore
class TestAirflowKubernetesScheduler(unittest.TestCase):
@staticmethod
def _gen_random_string(seed, str_len):
char_list = []
for char_seed in range(str_len):
random.seed(str(seed) * char_seed)
char_list.append(random.choice(string.printable))
return ''.join(char_list)
def _cases(self):
cases = [
("my_dag_id", "my-task-id"),
("my.dag.id", "my.task.id"),
("MYDAGID", "MYTASKID"),
("my_dag_id", "my_task_id"),
("mydagid" * 200, "my_task_id" * 200)
]
cases.extend([
(self._gen_random_string(seed, 200), self._gen_random_string(seed, 200))
for seed in range(100)
])
return cases
@staticmethod
def _is_valid_pod_id(name):
regex = r"^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$"
return (
len(name) <= 253 and
all(ch.lower() == ch for ch in name) and
re.match(regex, name))
@staticmethod
def _is_safe_label_value(value):
regex = r'^[^a-z0-9A-Z]*|[^a-zA-Z0-9_\-\.]|[^a-z0-9A-Z]*$'
return (
len(value) <= 63 and
re.match(regex, value))
@unittest.skipIf(AirflowKubernetesScheduler is None,
'kubernetes python package is not installed')
def test_create_pod_id(self):
for dag_id, task_id in self._cases():
pod_name = AirflowKubernetesScheduler._create_pod_id(dag_id, task_id)
self.assertTrue(self._is_valid_pod_id(pod_name))
def test_make_safe_label_value(self):
for dag_id, task_id in self._cases():
safe_dag_id = AirflowKubernetesScheduler._make_safe_label_value(dag_id)
self.assertTrue(self._is_safe_label_value(safe_dag_id))
safe_task_id = AirflowKubernetesScheduler._make_safe_label_value(task_id)
self.assertTrue(self._is_safe_label_value(safe_task_id))
id = "my_dag_id"
self.assertEqual(
id,
AirflowKubernetesScheduler._make_safe_label_value(id)
)
id = "my_dag_id_" + "a" * 64
self.assertEqual(
"my_dag_id_" + "a" * 43 + "-0ce114c45",
AirflowKubernetesScheduler._make_safe_label_value(id)
)
@unittest.skipIf(AirflowKubernetesScheduler is None,
"kubernetes python package is not installed")
def test_execution_date_serialize_deserialize(self):
datetime_obj = datetime.now()
serialized_datetime = \
AirflowKubernetesScheduler._datetime_to_label_safe_datestring(
datetime_obj)
new_datetime_obj = AirflowKubernetesScheduler._label_safe_datestring_to_datetime(
serialized_datetime)
self.assertEqual(datetime_obj, new_datetime_obj)
class TestKubernetesWorkerConfiguration(unittest.TestCase):
"""
Tests that if dags_volume_subpath/logs_volume_subpath configuration
options are passed to worker pod config
"""
affinity_config = {
'podAntiAffinity': {
'requiredDuringSchedulingIgnoredDuringExecution': [
{
'topologyKey': 'kubernetes.io/hostname',
'labelSelector': {
'matchExpressions': [
{
'key': 'app',
'operator': 'In',
'values': ['airflow']
}
]
}
}
]
}
}
tolerations_config = [
{
'key': 'dedicated',
'operator': 'Equal',
'value': 'airflow'
},
{
'key': 'prod',
'operator': 'Exists'
}
]
def setUp(self):
if AirflowKubernetesScheduler is None:
self.skipTest("kubernetes python package is not installed")
self.resources = mock.patch(
'airflow.kubernetes.worker_configuration.Resources'
)
for patcher in [self.resources]:
self.mock_foo = patcher.start()
self.addCleanup(patcher.stop)
self.kube_config = mock.MagicMock()
self.kube_config.airflow_home = '/'
self.kube_config.airflow_dags = 'dags'
self.kube_config.airflow_dags = 'logs'
self.kube_config.dags_volume_subpath = None
self.kube_config.logs_volume_subpath = None
self.kube_config.dags_in_image = False
self.kube_config.dags_folder = None
self.kube_config.git_dags_folder_mount_point = None
self.kube_config.kube_labels = {'dag_id': 'original_dag_id', 'my_label': 'label_id'}
def test_worker_configuration_no_subpaths(self):
worker_config = WorkerConfiguration(self.kube_config)
volumes, volume_mounts = worker_config._get_volumes_and_mounts()
volumes_list = [value for value in volumes.values()]
volume_mounts_list = [value for value in volume_mounts.values()]
for volume_or_mount in volumes_list + volume_mounts_list:
if volume_or_mount['name'] != 'airflow-config':
self.assertNotIn(
'subPath', volume_or_mount,
"subPath shouldn't be defined"
)
@mock.patch.object(conf, 'get')
@mock.patch.object(configuration, 'as_dict')
def test_worker_configuration_auth_both_ssh_and_user(self, mock_config_as_dict, mock_conf_get):
def get_conf(*args, **kwargs):
if(args[0] == 'core'):
return '1'
if(args[0] == 'kubernetes'):
if(args[1] == 'git_ssh_known_hosts_configmap_name'):
return 'airflow-configmap'
if(args[1] == 'git_ssh_key_secret_name'):
return 'airflow-secrets'
if(args[1] == 'git_user'):
return 'some-user'
if(args[1] == 'git_password'):
return 'some-password'
if(args[1] == 'git_repo'):
return 'git@github.com:apache/airflow.git'
if(args[1] == 'git_branch'):
return 'master'
if(args[1] == 'git_dags_folder_mount_point'):
return '/usr/local/airflow/dags'
if(args[1] == 'delete_worker_pods'):
return True
if(args[1] == 'kube_client_request_args'):
return '{"_request_timeout" : [60,360] }'
return '1'
return None
mock_conf_get.side_effect = get_conf
mock_config_as_dict.return_value = {'core': ''}
with self.assertRaisesRegexp(AirflowConfigException,
'either `git_user` and `git_password`.*'
'or `git_ssh_key_secret_name`.*'
'but not both$'):
KubeConfig()
def test_worker_with_subpaths(self):
self.kube_config.dags_volume_subpath = 'dags'
self.kube_config.logs_volume_subpath = 'logs'
worker_config = WorkerConfiguration(self.kube_config)
volumes, volume_mounts = worker_config._get_volumes_and_mounts()
for volume in [value for value in volumes.values()]:
self.assertNotIn(
'subPath', volume,
"subPath isn't valid configuration for a volume"
)
for volume_mount in [value for value in volume_mounts.values()]:
if volume_mount['name'] != 'airflow-config':
self.assertIn(
'subPath', volume_mount,
"subPath should've been passed to volumeMount configuration"
)
def test_worker_generate_dag_volume_mount_path(self):
self.kube_config.git_dags_folder_mount_point = '/root/airflow/git/dags'
self.kube_config.dags_folder = '/root/airflow/dags'
worker_config = WorkerConfiguration(self.kube_config)
self.kube_config.dags_volume_claim = 'airflow-dags'
self.kube_config.dags_volume_host = ''
dag_volume_mount_path = worker_config.generate_dag_volume_mount_path()
self.assertEqual(dag_volume_mount_path, self.kube_config.dags_folder)
self.kube_config.dags_volume_claim = ''
self.kube_config.dags_volume_host = '/host/airflow/dags'
dag_volume_mount_path = worker_config.generate_dag_volume_mount_path()
self.assertEqual(dag_volume_mount_path, self.kube_config.dags_folder)
self.kube_config.dags_volume_claim = ''
self.kube_config.dags_volume_host = ''
dag_volume_mount_path = worker_config.generate_dag_volume_mount_path()
self.assertEqual(dag_volume_mount_path,
self.kube_config.git_dags_folder_mount_point)
def test_worker_environment_no_dags_folder(self):
self.kube_config.airflow_configmap = ''
self.kube_config.git_dags_folder_mount_point = ''
self.kube_config.dags_folder = ''
worker_config = WorkerConfiguration(self.kube_config)
env = worker_config._get_environment()
self.assertNotIn('AIRFLOW__CORE__DAGS_FOLDER', env)
def test_worker_environment_when_dags_folder_specified(self):
self.kube_config.airflow_configmap = 'airflow-configmap'
self.kube_config.git_dags_folder_mount_point = ''
dags_folder = '/workers/path/to/dags'
self.kube_config.dags_folder = dags_folder
worker_config = WorkerConfiguration(self.kube_config)
env = worker_config._get_environment()
self.assertEqual(dags_folder, env['AIRFLOW__CORE__DAGS_FOLDER'])
def test_worker_environment_dags_folder_using_git_sync(self):
self.kube_config.airflow_configmap = 'airflow-configmap'
self.kube_config.git_sync_dest = 'repo'
self.kube_config.git_subpath = 'dags'
self.kube_config.git_dags_folder_mount_point = '/workers/path/to/dags'
dags_folder = '{}/{}/{}'.format(self.kube_config.git_dags_folder_mount_point,
self.kube_config.git_sync_dest,
self.kube_config.git_subpath)
worker_config = WorkerConfiguration(self.kube_config)
env = worker_config._get_environment()
self.assertEqual(dags_folder, env['AIRFLOW__CORE__DAGS_FOLDER'])
def test_init_environment_using_git_sync_ssh_without_known_hosts(self):
# Tests the init environment created with git-sync SSH authentication option is correct
# without known hosts file
self.kube_config.airflow_configmap = 'airflow-configmap'
self.kube_config.git_ssh_secret_name = 'airflow-secrets'
self.kube_config.git_ssh_known_hosts_configmap_name = None
self.kube_config.dags_volume_claim = None
self.kube_config.dags_volume_host = None
self.kube_config.dags_in_image = None
worker_config = WorkerConfiguration(self.kube_config)
init_containers = worker_config._get_init_containers()
self.assertTrue(init_containers) # check not empty
env = init_containers[0]['env']
self.assertTrue({'name': 'GIT_SSH_KEY_FILE', 'value': '/etc/git-secret/ssh'} in env)
self.assertTrue({'name': 'GIT_KNOWN_HOSTS', 'value': 'false'} in env)
self.assertTrue({'name': 'GIT_SYNC_SSH', 'value': 'true'} in env)
def test_init_environment_using_git_sync_ssh_with_known_hosts(self):
# Tests the init environment created with git-sync SSH authentication option is correct
# with known hosts file
self.kube_config.airflow_configmap = 'airflow-configmap'
self.kube_config.git_ssh_key_secret_name = 'airflow-secrets'
self.kube_config.git_ssh_known_hosts_configmap_name = 'airflow-configmap'
self.kube_config.dags_volume_claim = None
self.kube_config.dags_volume_host = None
self.kube_config.dags_in_image = None
worker_config = WorkerConfiguration(self.kube_config)
init_containers = worker_config._get_init_containers()
self.assertTrue(init_containers) # check not empty
env = init_containers[0]['env']
self.assertTrue({'name': 'GIT_SSH_KEY_FILE', 'value': '/etc/git-secret/ssh'} in env)
self.assertTrue({'name': 'GIT_KNOWN_HOSTS', 'value': 'true'} in env)
self.assertTrue({'name': 'GIT_SSH_KNOWN_HOSTS_FILE',
'value': '/etc/git-secret/known_hosts'} in env)
self.assertTrue({'name': 'GIT_SYNC_SSH', 'value': 'true'} in env)
def test_init_environment_using_git_sync_user_without_known_hosts(self):
# Tests the init environment created with git-sync User authentication option is correct
# without known hosts file
self.kube_config.airflow_configmap = 'airflow-configmap'
self.kube_config.git_user = 'git_user'
self.kube_config.git_password = 'git_password'
self.kube_config.git_ssh_known_hosts_configmap_name = None
self.kube_config.git_ssh_key_secret_name = None
self.kube_config.dags_volume_claim = None
self.kube_config.dags_volume_host = None
self.kube_config.dags_in_image = None
worker_config = WorkerConfiguration(self.kube_config)
init_containers = worker_config._get_init_containers()
self.assertTrue(init_containers) # check not empty
env = init_containers[0]['env']
self.assertFalse({'name': 'GIT_SSH_KEY_FILE', 'value': '/etc/git-secret/ssh'} in env)
self.assertTrue({'name': 'GIT_SYNC_USERNAME', 'value': 'git_user'} in env)
self.assertTrue({'name': 'GIT_SYNC_PASSWORD', 'value': 'git_password'} in env)
self.assertTrue({'name': 'GIT_KNOWN_HOSTS', 'value': 'false'} in env)
self.assertFalse({'name': 'GIT_SSH_KNOWN_HOSTS_FILE',
'value': '/etc/git-secret/known_hosts'} in env)
self.assertFalse({'name': 'GIT_SYNC_SSH', 'value': 'true'} in env)
def test_init_environment_using_git_sync_user_with_known_hosts(self):
# Tests the init environment created with git-sync User authentication option is correct
# with known hosts file
self.kube_config.airflow_configmap = 'airflow-configmap'
self.kube_config.git_user = 'git_user'
self.kube_config.git_password = 'git_password'
self.kube_config.git_ssh_known_hosts_configmap_name = 'airflow-configmap'
self.kube_config.git_ssh_key_secret_name = None
self.kube_config.dags_volume_claim = None
self.kube_config.dags_volume_host = None
self.kube_config.dags_in_image = None
worker_config = WorkerConfiguration(self.kube_config)
init_containers = worker_config._get_init_containers()
self.assertTrue(init_containers) # check not empty
env = init_containers[0]['env']
self.assertFalse({'name': 'GIT_SSH_KEY_FILE', 'value': '/etc/git-secret/ssh'} in env)
self.assertTrue({'name': 'GIT_SYNC_USERNAME', 'value': 'git_user'} in env)
self.assertTrue({'name': 'GIT_SYNC_PASSWORD', 'value': 'git_password'} in env)
self.assertTrue({'name': 'GIT_KNOWN_HOSTS', 'value': 'true'} in env)
self.assertTrue({'name': 'GIT_SSH_KNOWN_HOSTS_FILE',
'value': '/etc/git-secret/known_hosts'} in env)
self.assertFalse({'name': 'GIT_SYNC_SSH', 'value': 'true'} in env)
def test_make_pod_git_sync_ssh_without_known_hosts(self):
# Tests the pod created with git-sync SSH authentication option is correct without known hosts
self.kube_config.airflow_configmap = 'airflow-configmap'
self.kube_config.git_ssh_key_secret_name = 'airflow-secrets'
self.kube_config.dags_volume_claim = None
self.kube_config.dags_volume_host = None
self.kube_config.dags_in_image = None
self.kube_config.worker_fs_group = None
worker_config = WorkerConfiguration(self.kube_config)
kube_executor_config = KubernetesExecutorConfig(annotations=[],
volumes=[],
volume_mounts=[])
pod = worker_config.make_pod("default", str(uuid.uuid4()), "test_pod_id", "test_dag_id",
"test_task_id", str(datetime.utcnow()), 1, "bash -c 'ls /'",
kube_executor_config)
init_containers = worker_config._get_init_containers()
git_ssh_key_file = next((x['value'] for x in init_containers[0]['env']
if x['name'] == 'GIT_SSH_KEY_FILE'), None)
volume_mount_ssh_key = next((x['mountPath'] for x in init_containers[0]['volumeMounts']
if x['name'] == worker_config.git_sync_ssh_secret_volume_name),
None)
self.assertTrue(git_ssh_key_file)
self.assertTrue(volume_mount_ssh_key)
self.assertEqual(65533, pod.security_context['fsGroup'])
self.assertEqual(git_ssh_key_file,
volume_mount_ssh_key,
'The location where the git ssh secret is mounted'
' needs to be the same as the GIT_SSH_KEY_FILE path')
def test_make_pod_git_sync_ssh_with_known_hosts(self):
# Tests the pod created with git-sync SSH authentication option is correct with known hosts
self.kube_config.airflow_configmap = 'airflow-configmap'
self.kube_config.git_ssh_secret_name = 'airflow-secrets'
self.kube_config.dags_volume_claim = None
self.kube_config.dags_volume_host = None
self.kube_config.dags_in_image = None
worker_config = WorkerConfiguration(self.kube_config)
init_containers = worker_config._get_init_containers()
git_ssh_known_hosts_file = next((x['value'] for x in init_containers[0]['env']
if x['name'] == 'GIT_SSH_KNOWN_HOSTS_FILE'), None)
volume_mount_ssh_known_hosts_file = next(
(x['mountPath'] for x in init_containers[0]['volumeMounts']
if x['name'] == worker_config.git_sync_ssh_known_hosts_volume_name),
None)
self.assertTrue(git_ssh_known_hosts_file)
self.assertTrue(volume_mount_ssh_known_hosts_file)
self.assertEqual(git_ssh_known_hosts_file,
volume_mount_ssh_known_hosts_file,
'The location where the git known hosts file is mounted'
' needs to be the same as the GIT_SSH_KNOWN_HOSTS_FILE path')
def test_make_pod_with_empty_executor_config(self):
self.kube_config.kube_affinity = self.affinity_config
self.kube_config.kube_tolerations = self.tolerations_config
worker_config = WorkerConfiguration(self.kube_config)
kube_executor_config = KubernetesExecutorConfig(annotations=[],
volumes=[],
volume_mounts=[]
)
pod = worker_config.make_pod("default", str(uuid.uuid4()), "test_pod_id", "test_dag_id",
"test_task_id", str(datetime.utcnow()), 1, "bash -c 'ls /'",
kube_executor_config)
self.assertTrue(pod.affinity['podAntiAffinity'] is not None)
self.assertEqual('app',
pod.affinity['podAntiAffinity']
['requiredDuringSchedulingIgnoredDuringExecution'][0]
['labelSelector']
['matchExpressions'][0]
['key'])
self.assertEqual(2, len(pod.tolerations))
self.assertEqual('prod', pod.tolerations[1]['key'])
def test_make_pod_with_executor_config(self):
worker_config = WorkerConfiguration(self.kube_config)
kube_executor_config = KubernetesExecutorConfig(affinity=self.affinity_config,
tolerations=self.tolerations_config,
annotations=[],
volumes=[],
volume_mounts=[]
)
pod = worker_config.make_pod("default", str(uuid.uuid4()), "test_pod_id", "test_dag_id",
"test_task_id", str(datetime.utcnow()), 1, "bash -c 'ls /'",
kube_executor_config)
self.assertTrue(pod.affinity['podAntiAffinity'] is not None)
self.assertEqual('app',
pod.affinity['podAntiAffinity']
['requiredDuringSchedulingIgnoredDuringExecution'][0]
['labelSelector']
['matchExpressions'][0]
['key'])
self.assertEqual(2, len(pod.tolerations))
self.assertEqual('prod', pod.tolerations[1]['key'])
def test_worker_pvc_dags(self):
# Tests persistence volume config created when `dags_volume_claim` is set
self.kube_config.dags_volume_claim = 'airflow-dags'
worker_config = WorkerConfiguration(self.kube_config)
volumes, volume_mounts = worker_config._get_volumes_and_mounts()
init_containers = worker_config._get_init_containers()
dag_volume = [volume for volume in volumes.values() if volume['name'] == 'airflow-dags']
dag_volume_mount = [mount for mount in volume_mounts.values() if mount['name'] == 'airflow-dags']
self.assertEqual('airflow-dags', dag_volume[0]['persistentVolumeClaim']['claimName'])
self.assertEqual(1, len(dag_volume_mount))
self.assertTrue(dag_volume_mount[0]['readOnly'])
self.assertEqual(0, len(init_containers))
def test_worker_git_dags(self):
# Tests persistence volume config created when `git_repo` is set
self.kube_config.dags_volume_claim = None
self.kube_config.dags_volume_host = None
self.kube_config.dags_folder = '/usr/local/airflow/dags'
self.kube_config.worker_dags_folder = '/usr/local/airflow/dags'
self.kube_config.git_sync_container_repository = 'gcr.io/google-containers/git-sync-amd64'
self.kube_config.git_sync_container_tag = 'v2.0.5'
self.kube_config.git_sync_container = 'gcr.io/google-containers/git-sync-amd64:v2.0.5'
self.kube_config.git_sync_init_container_name = 'git-sync-clone'
self.kube_config.git_subpath = 'dags_folder'
self.kube_config.git_sync_root = '/git'
self.kube_config.git_dags_folder_mount_point = '/usr/local/airflow/dags/repo/dags_folder'
worker_config = WorkerConfiguration(self.kube_config)
volumes, volume_mounts = worker_config._get_volumes_and_mounts()
dag_volume = [volume for volume in volumes.values() if volume['name'] == 'airflow-dags']
dag_volume_mount = [mount for mount in volume_mounts.values() if mount['name'] == 'airflow-dags']
self.assertTrue('emptyDir' in dag_volume[0])
self.assertEqual(self.kube_config.git_dags_folder_mount_point, dag_volume_mount[0]['mountPath'])
self.assertTrue(dag_volume_mount[0]['readOnly'])
init_container = worker_config._get_init_containers()[0]
init_container_volume_mount = [mount for mount in init_container['volumeMounts']
if mount['name'] == 'airflow-dags']
self.assertEqual('git-sync-clone', init_container['name'])
self.assertEqual('gcr.io/google-containers/git-sync-amd64:v2.0.5', init_container['image'])
self.assertEqual(1, len(init_container_volume_mount))
self.assertFalse(init_container_volume_mount[0]['readOnly'])
def test_worker_container_dags(self):
# Tests that the 'airflow-dags' persistence volume is NOT created when `dags_in_image` is set
self.kube_config.dags_in_image = True
worker_config = WorkerConfiguration(self.kube_config)
volumes, volume_mounts = worker_config._get_volumes_and_mounts()
dag_volume = [volume for volume in volumes.values() if volume['name'] == 'airflow-dags']
dag_volume_mount = [mount for mount in volume_mounts.values() if mount['name'] == 'airflow-dags']
init_containers = worker_config._get_init_containers()
self.assertEqual(0, len(dag_volume))
self.assertEqual(0, len(dag_volume_mount))
self.assertEqual(0, len(init_containers))
def test_kubernetes_environment_variables(self):
# Tests the kubernetes environment variables get copied into the worker pods
input_environment = {
'ENVIRONMENT': 'prod',
'LOG_LEVEL': 'warning'
}
self.kube_config.kube_env_vars = input_environment
worker_config = WorkerConfiguration(self.kube_config)
env = worker_config._get_environment()
for key in input_environment:
self.assertIn(key, env)
self.assertIn(input_environment[key], env.values())
core_executor = 'AIRFLOW__CORE__EXECUTOR'
input_environment = {
core_executor: 'NotLocalExecutor'
}
self.kube_config.kube_env_vars = input_environment
worker_config = WorkerConfiguration(self.kube_config)
env = worker_config._get_environment()
self.assertEqual(env[core_executor], 'LocalExecutor')
def test_get_secrets(self):
# Test when secretRef is None and kube_secrets is not empty
self.kube_config.kube_secrets = {
'AWS_SECRET_KEY': 'airflow-secret=aws_secret_key',
'POSTGRES_PASSWORD': 'airflow-secret=postgres_credentials'
}
self.kube_config.env_from_secret_ref = None
worker_config = WorkerConfiguration(self.kube_config)
secrets = worker_config._get_secrets()
secrets.sort(key=lambda secret: secret.deploy_target)
expected = [
Secret('env', 'AWS_SECRET_KEY', 'airflow-secret', 'aws_secret_key'),
Secret('env', 'POSTGRES_PASSWORD', 'airflow-secret', 'postgres_credentials')
]
self.assertListEqual(expected, secrets)
# Test when secret is not empty and kube_secrets is empty dict
self.kube_config.kube_secrets = {}
self.kube_config.env_from_secret_ref = 'secret_a,secret_b'
worker_config = WorkerConfiguration(self.kube_config)
secrets = worker_config._get_secrets()
expected = [
Secret('env', None, 'secret_a'),
Secret('env', None, 'secret_b')
]
self.assertListEqual(expected, secrets)
def test_get_configmaps(self):
# Test when configmap is empty
self.kube_config.env_from_configmap_ref = ''
worker_config = WorkerConfiguration(self.kube_config)
configmaps = worker_config._get_configmaps()
self.assertListEqual([], configmaps)
# test when configmap is not empty
self.kube_config.env_from_configmap_ref = 'configmap_a,configmap_b'
worker_config = WorkerConfiguration(self.kube_config)
configmaps = worker_config._get_configmaps()
self.assertListEqual(['configmap_a', 'configmap_b'], configmaps)
def test_get_labels(self):
worker_config = WorkerConfiguration(self.kube_config)
labels = worker_config._get_labels({
'dag_id': 'override_dag_id',
})
self.assertEqual({'my_label': 'label_id', 'dag_id': 'override_dag_id'}, labels)
class TestKubernetesExecutor(unittest.TestCase):
"""
Tests if an ApiException from the Kube Client will cause the task to
be rescheduled.
"""
@unittest.skipIf(AirflowKubernetesScheduler is None,
'kubernetes python package is not installed')
@mock.patch('airflow.executors.kubernetes_executor.KubernetesJobWatcher')
@mock.patch('airflow.executors.kubernetes_executor.get_kube_client')
def test_run_next_exception(self, mock_get_kube_client, mock_kubernetes_job_watcher):
# When a quota is exceeded this is the ApiException we get
r = HTTPResponse()
r.body = {
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Failure",
"message": "pods \"podname\" is forbidden: " +
"exceeded quota: compute-resources, " +
"requested: limits.memory=4Gi, " +
"used: limits.memory=6508Mi, " +
"limited: limits.memory=10Gi",
"reason": "Forbidden",
"details": {"name": "podname", "kind": "pods"},
"code": 403},
r.status = 403
r.reason = "Forbidden"
# A mock kube_client that throws errors when making a pod
mock_kube_client = mock.patch('kubernetes.client.CoreV1Api', autospec=True)
mock_kube_client.create_namespaced_pod = mock.MagicMock(
side_effect=ApiException(http_resp=r))
mock_get_kube_client.return_value = mock_kube_client
kubernetesExecutor = KubernetesExecutor()
kubernetesExecutor.start()
# Execute a task while the Api Throws errors
try_number = 1
kubernetesExecutor.execute_async(key=('dag', 'task', datetime.utcnow(), try_number),
command='command', executor_config={})
kubernetesExecutor.sync()
kubernetesExecutor.sync()
assert mock_kube_client.create_namespaced_pod.called
self.assertFalse(kubernetesExecutor.task_queue.empty())
# Disable the ApiException
mock_kube_client.create_namespaced_pod.side_effect = None
# Execute the task without errors should empty the queue
kubernetesExecutor.sync()
assert mock_kube_client.create_namespaced_pod.called
self.assertTrue(kubernetesExecutor.task_queue.empty())
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "02af5332d31ccc1685a3fbadffdc0e77",
"timestamp": "",
"source": "github",
"line_count": 681,
"max_line_length": 105,
"avg_line_length": 45.80469897209985,
"alnum_prop": 0.5968646811784696,
"repo_name": "r39132/airflow",
"id": "a19e36b7f5ad7583ade861365146cd45c4fa800e",
"size": "31981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/executors/test_kubernetes_executor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4111"
},
{
"name": "HTML",
"bytes": "128531"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5928206"
},
{
"name": "Shell",
"bytes": "41869"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Authenticator', fields ['user', 'type']
db.create_unique('auth_authenticator', ['user_id', 'type'])
def backwards(self, orm):
# Removing unique constraint on 'Authenticator', fields ['user', 'type']
db.delete_unique('auth_authenticator', ['user_id', 'type'])
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True'}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2016, 9, 29, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {'object_name': 'DSymBundle'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'sdk': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymSDK']"})
},
'sentry.dsymobject': {
'Meta': {'object_name': 'DSymObject'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_path': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'}),
'vmaddr': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'vmsize': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'})
},
'sentry.dsymsdk': {
'Meta': {'object_name': 'DSymSDK', 'index_together': "[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"},
'dsym_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'sdk_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'version_build': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {'unique_together': "[('object', 'address')]", 'object_name': 'DSymSymbol'},
'address': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'),)", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {'object_name': 'GlobalDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project', 'key', 'value'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('project_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry'] | {
"content_hash": "2f496777b4f34ac8eb643f57d5d115b3",
"timestamp": "",
"source": "github",
"line_count": 644,
"max_line_length": 217,
"avg_line_length": 88.80590062111801,
"alnum_prop": 0.5732545330559004,
"repo_name": "JackDanger/sentry",
"id": "7e5696b303e734db93d45aedbdcd51b3f8b009cb",
"size": "57215",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/sentry/south_migrations/0272_auto__add_unique_authenticator_user_type.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "583430"
},
{
"name": "HTML",
"bytes": "319622"
},
{
"name": "JavaScript",
"bytes": "624672"
},
{
"name": "Makefile",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "6279717"
}
],
"symlink_target": ""
} |
from ...utils import verbose
from ..utils import (_data_path_doc,
_get_version, _version_doc, _download_mne_dataset)
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None): # noqa: D103
return _download_mne_dataset(
name='fieldtrip_cmc', processor='nested_unzip', path=path,
force_update=force_update, update_path=update_path,
download=download)
data_path.__doc__ = _data_path_doc.format(
name='fieldtrip_cmc', conf='MNE_DATASETS_FIELDTRIP_CMC_PATH')
def get_version(): # noqa: D103
return _get_version('fieldtrip_cmc')
get_version.__doc__ = _version_doc.format(name='fieldtrip_cmc')
| {
"content_hash": "cb3ffa080c8cc01119c6767dae47379c",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 30.91304347826087,
"alnum_prop": 0.6582278481012658,
"repo_name": "bloyl/mne-python",
"id": "f435ef57cfddeee91f5d4c3404180297c616d4f7",
"size": "849",
"binary": false,
"copies": "2",
"ref": "refs/heads/placeholder",
"path": "mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "Makefile",
"bytes": "4450"
},
{
"name": "Python",
"bytes": "8190297"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
import cv2
import cv2.cv as cv
import numpy as np
import signal, os, subprocess, sys
import time
import threading
import requests
import io
from picamera.array import PiRGBArray
from picamera import PiCamera
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
from fractions import Fraction
#
GPIO.setup(18, GPIO.OUT)
camera = PiCamera()
camera.framerate = 32
#camera.framerate = Fraction(1,6)
raw_capture = PiRGBArray(camera)
output = PiRGBArray(camera)
time.sleep(0.1)
"""
#g = camera.awb_gains
g = (Fraction(1, 1), Fraction(1,1))
print g
camera.exposure_mode = 'off'
camera.shutter_speed = 500000
camera.awb_mode = 'off'
camera.awb_gains = g
camera.capture(output, format="bgr")
img = output.array
b,g,r = cv2.split(img)
cv2.imshow('frame',g)
key = cv2.waitKey(0) & 0xFF
"""
camera.awb_mode = 'off'
camera.awb_gains = (Fraction(1,3), Fraction(1,3))
camera.shutter_speed = 32000
pwm = GPIO.PWM(18, 100)
pwm.start(1)
#camera.awb_gains = (Fraction(2), Fraction(2))
try:
for video_frame in camera.capture_continuous(raw_capture, format="bgr", use_video_port=True):
frame = video_frame.array
#print camera.awb_gains
print (float(camera.awb_gains[0]), float(camera.awb_gains[1]))
print (camera.exposure_speed)
# gains are about 1/3, 1/3
# Our operations on the frame come here
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#gray = frame
integral_table = cv2.integral(frame)
image_y = int(frame.shape[0])
image_x = int(frame.shape[1])
cv2.imshow('temp', frame)
key = cv2.waitKey(30) & 0xFF
if key == ord('q'):
break
time.sleep(0.02)
# clear the stream in preparation for the next frame
raw_capture.truncate(0)
finally:
cv2.destroyAllWindows()
camera.close()
pwm.stop()
GPIO.cleanup()
| {
"content_hash": "ad67f837095f553e286c2df38453d0b3",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 97,
"avg_line_length": 22.261904761904763,
"alnum_prop": 0.658288770053476,
"repo_name": "Cornell-iGEM/iGEM-Detection",
"id": "ed767af023726b891709db32c9714b005b010205",
"size": "1870",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "detection/calibration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50403"
}
],
"symlink_target": ""
} |
from collections import namedtuple
import os
# third party
from sklearn import datasets
from tabulate import tabulate
HousingData = namedtuple("HousingData", 'features prices names'.split())
def load_housing_data():
"""
Convenience function to get the Boston housing data
:return: housing_features, housing_prices
"""
city_data = datasets.load_boston()
return HousingData(features=city_data.data, prices=city_data.target,
names=city_data.feature_names)
CLIENT_FEATURES = [[11.95, 0.00, 18.100, 0, 0.6590, 5.6090, 90.00, 1.385, 24,
680.0, 20.20, 332.09, 12.13]]
class PrinterConstants(object):
__slots__ = ()
as_is = '{0}'
two_digits = '{0:.2f}'
count = 'Count'
proportion = 'Proportion'
# end PrinterConstants
class ValueCountsPrinter(object):
"""
A class to print a value-counts table
"""
def __init__(self, value_counts,
label,
format_string=PrinterConstants.as_is,
count_or_proportion=PrinterConstants.count):
"""
:param:
- `value_counts`: pandas value_counts Series
- `label`: header-label for the data
- `format_string`: format string for the count/proportion column
- `count_or_proportion`: Header for the count/proportion column
"""
self.value_counts = value_counts
self.label = label
self.format_string = format_string
self.count_or_proportion = count_or_proportion
self._first_width = None
self._second_width = None
self._row_format_string = None
self._header_string = None
self._top_separator = None
self._bottom_separator = None
self._sum_row = None
return
@property
def first_width(self):
"""
Width of first column's longest label
"""
if self._first_width is None:
self._first_width = len(self.label)
self._first_width = max(self._first_width,
max(len(str(i))
for i in self.value_counts.index))
return self._first_width
@property
def second_width(self):
"""
Width of the second column header
"""
if self._second_width is None:
self._second_width = len(self.count_or_proportion)
return self._second_width
@property
def row_format_string(self):
"""
Format-string for the rows
"""
if self._row_format_string is None:
self._row_format_string = "{{0:<{0}}} {{1:>{1}}}".format(self.first_width,
self.second_width)
return self._row_format_string
@property
def header_string(self):
"""
First line of the output
"""
if self._header_string is None:
self._header_string = self.row_format_string.format(self.label,
self.count_or_proportion)
return self._header_string
@property
def top_separator(self):
"""
Separator between header and counts
"""
if self._top_separator is None:
self._top_separator = '=' * (self.first_width + self.second_width + 1)
return self._top_separator
@property
def bottom_separator(self):
"""
Separator between counts and total
"""
if self._bottom_separator is None:
self._bottom_separator = '-' * len(self.top_separator)
return self._bottom_separator
@property
def sum_row(self):
"""
Final row with sum of count column
"""
if self._sum_row is None:
format_string = '{{0}} {{1:>{0}}}'.format(self.second_width)
sum_value = self.format_string.format(self.value_counts.values.sum())
self._sum_row = format_string.format(' ' * self.first_width,
sum_value)
return self._sum_row
def __str__(self):
content = '\n'.join((self.row_format_string.format(value,
self.format_string.format(self.value_counts.values[index]))
for index,value in enumerate(self.value_counts.index)))
return "{0}\n{1}\n{2}\n{3}\n{4}".format(self.header_string,
self.top_separator,
content,
self.bottom_separator,
self.sum_row)
def __call__(self):
"""
Convenience method to print the string
"""
print(str(self))
# end ValueCountsPrinter
class ValueProportionsPrinter(ValueCountsPrinter):
"""
Printer for proportion tables
"""
def __init__(self, value_counts, label,
format_string=PrinterConstants.two_digits,
count_or_proportion=PrinterConstants.proportion):
super(ValueProportionsPrinter, self).__init__(value_counts=value_counts,
label=label,
format_string=format_string,
count_or_proportion=count_or_proportion)
return
# end ValueProportionsPrinter
def print_value_counts(value_counts, header, format_string='{0}'):
"""
prints the value counts
:param:
- `value_counts`: pandas value_counts returned object
- `header`: list of header names (exactly two)
- `format_string`: format string for values
"""
first_width = len(header[0])
if value_counts.index.dtype == 'object':
first_width = max(first_width, max(len(i) for i in value_counts.index))
second_width = len(header[1])
format_string = "{{0:<{0}}} {{1:>{1}}}".format(first_width, second_width)
header_string = format_string.format(*header)
top_separator = '=' * (first_width + len(header[1]) + 1)
separator = '-' * len(top_separator)
print(header_string)
print(top_separator)
for index, value in enumerate(value_counts.index):
print(format_string.format(value,
format_string.format(value_counts
.values[index])))
print(separator)
print('{0} {1:>{2}}'.format(' ' * first_width,
format_string
.format(value_counts.values.sum()),
second_width))
return
def print_properties(data_type, values, construction, missing='None', table_format='orgtbl'):
"""
Prints out the table of properties
"""
print(tabulate([['Data Type', data_type],
['Values', values],
['Missing Values', missing],
['Construction', "Created from '{0}'".format(construction)]],
headers='Property Description'.split(),
tablefmt=table_format))
def print_image_directive(filename, figure, scale='95%', print_only=False):
"""
saves and prints the rst image directive
:param:
- `filename`: filename to save the image (without 'figures/' or file extension)
- `figure`: matplotlib figure to save the image
- `scale: percent scale for the image
- `print_only`: assume the figure exists, print directive only
:postcondition: figure saved, rst image directive output
"""
path = os.path.join('figures/', filename)
if not print_only:
figure.savefig(path + '.svg')
figure.savefig(path + '.pdf')
print(".. image:: {0}.*".format(path))
print(" :align: center")
print(" :scale: {0}".format(scale)) | {
"content_hash": "d4bb49c777fa4d8bb88cc1de1d581c6b",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 118,
"avg_line_length": 36.45662100456621,
"alnum_prop": 0.5388276553106213,
"repo_name": "necromuralist/boston_housing",
"id": "9b7d10c8c4c95f8baab3c023f0f672b30ad0c073",
"size": "8011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boston_housing/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "16585"
},
{
"name": "Makefile",
"bytes": "6221"
},
{
"name": "Python",
"bytes": "38293"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand
from pyquery import PyQuery
from olympia.applications.models import AppVersion
from olympia.constants.applications import APP_GUIDS
class Command(BaseCommand):
help = "Import the application versions created on addons.mozilla.org."""
def handle(self, *args, **options):
log = self.stdout.write
doc = PyQuery(
url='https://addons.mozilla.org/en-US/firefox/pages/appversions/')
codes = doc('.prose ul li code')
for i in range(0, codes.length, 2):
app = APP_GUIDS[codes[i].text]
log('Import versions for {0}'.format(app.short))
versions = codes[i + 1].text.split(', ')
for version in versions:
AppVersion.objects.get_or_create(application=app.id,
version=version)
| {
"content_hash": "bb55d4b70e3fe5c6794e92f1fc3e72e2",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 78,
"avg_line_length": 38.391304347826086,
"alnum_prop": 0.6183465458663646,
"repo_name": "Prashant-Surya/addons-server",
"id": "9179c8c51debf1869caac8b864537f8af7c8df09",
"size": "883",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "src/olympia/applications/management/commands/import_prod_versions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "806102"
},
{
"name": "HTML",
"bytes": "724766"
},
{
"name": "JavaScript",
"bytes": "1325556"
},
{
"name": "Makefile",
"bytes": "7671"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "4311566"
},
{
"name": "Shell",
"bytes": "9699"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.