text stringlengths 4 1.02M | meta dict |
|---|---|
"""Driver sets up the algorithm and problem space, sets up an interactive loop
for user input and provides output.
"""
import sys
import time
import resource # TODO
import random
#from algorithms.bfs import Bfs
from problems.tictactoe import TicTacToe
"""Handle user input and return the selected algorithm and problem."""
def handle_input():
algorithms = {
#"bfs": Bfs, # Breadth-First Search
}
problems = {
"tictactoe": TicTacToe,
}
if len(sys.argv) != 3:
print("Invalid usage. Please refer to README.md")
exit(2)
else:
#return algorithms[sys.argv[1]], problems[sys.argv[2]]
return None, problems[sys.argv[2]]
# TODO doc
def play(puzzle, strategy):
start(puzzle)
while (puzzle.winner() == None):
next_turn(puzzle, strategy)
end(puzzle)
# TODO doc
def start(puzzle):
puzzle.print()
puzzle.current_player = random.randint(0, 1)
if (puzzle.current_player == 0):
print("You go first!")
else:
print("The AI will go first.")
time.sleep(2)
# TODO doc
def next_turn(puzzle, strategy):
puzzle.print()
if (puzzle.current_player == 0):
puzzle.player_move()
else:
puzzle.set_state(strategy.search())
puzzle.current_player = 1 - puzzle.current_player
# TODO doc
def end(puzzle):
puzzle.print()
if (puzzle.winner() == 0):
print("Congratulations, you win!")
elif (puzzle.winner() == 1):
print("The AI has won.")
else:
print("The game is a draw.")
"""Write the results out to a file."""
def write_out(results):
output_file = open("./output.txt", "w")
keys = list(results.keys())
keys.sort()
for field in keys:
format_string = {
int: "{}: {:,}\n",
float: "{}: {:2f}\n",
str: "{}: {}\n",
list: "{}: {}\n"
}[type(results[field])]
output_file.write(format_string.format(field, results[field]))
output_file.close()
print("Process completed. Results in output.txt")
"""Set up the game with the strategy and play until complete, then report."""
if __name__ == '__main__':
algorithm, puzzle = handle_input()
game = puzzle()
strategy = algorithm(game)
play(game, strategy)
write_out(strategy.results())
| {
"content_hash": "dc712434b084852c3f559775479bee7e",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 78,
"avg_line_length": 24.903225806451612,
"alnum_prop": 0.5993091537132987,
"repo_name": "PhilipCastiglione/learning-machines",
"id": "76eb9289001f001dd2e4011b66c2ff318523bbfa",
"size": "2316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adversarial_search/driver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1099"
},
{
"name": "C++",
"bytes": "32358"
},
{
"name": "Matlab",
"bytes": "189456"
},
{
"name": "Python",
"bytes": "100140"
},
{
"name": "Shell",
"bytes": "40"
}
],
"symlink_target": ""
} |
import sys
import re
from .product import Product
class ProductCreator(object):
__lady_blouse_regex = re.compile('http:\/\/(i[12]\.ztat\.net[a-zA-Z\/0-9-@]*\.?[a-zA-Z0-9]*?\.jpg) (?:([a-zA-Z&\u00fc\. ]*) - ([a-zA-Z ]*)|([a-zA-Z&\u00fc\.\s]*) (Bluse))\s-\s([a-zA-Z\u00df\-\/ ]*) ([0-9, ]*) \u20ac ? (?:[0-9]{2}(?: cm) ){1,2}(?:bei|in) Gr\u00f6\u00dfe (?:(?:EU )?[0-9]{1,2}|[A-Z]{1,3}) ([a-zA-Z -]*) ((?:[0-9]{1,3}% [a-zA-Z]*(?:, )?)+)')
__gentleman_trouser_regex = re.compile('http:\/\/(i[12]\.ztat\.net[a-zA-Z\/0-9-@]*\.?[0-9]?\.jpg) ([\w\s&Üéö!]*)(?: - )?([\wäöüÄÖÜ]*) - ([a-zA-Zß\/\s]*)(?:\s*)([0-9,]*) € (:?[\d ]*) cm bei Größe (:?[\w\d\/]*) (:?[\d ]*) cm bei Größe (:?[\w\d\/]*) ([\w\s,äöüÄÖÜß-]*) ([\d\w,% ]*)')
@staticmethod
def create_lady_blouse_from_description(description):
match = ProductCreator.__lady_blouse_regex.match(description)
if not match:
raise ValueError(description)
product = Product()
product.image_name = ProductCreator._LadyBlouse.get_image_name_from_match(match)
product.add_term(ProductCreator._LadyBlouse.get_brand_from_match(match))
product.add_term(ProductCreator._LadyBlouse.get_cloth_type_from_match(match))
product.add_term(ProductCreator._LadyBlouse.get_price_from_match(match))
product.add_term(ProductCreator._LadyBlouse.get_collar_type_from_match(match))
for material in ProductCreator._LadyBlouse.get_materials_from_match(match):
product.add_term(material)
pass
for colour in ProductCreator._LadyBlouse.get_colours_from_match(match):
product.add_term(colour)
pass
return product
@staticmethod
def create_gentleman_trouser_from_description(description):
match = ProductCreator.__gentleman_trouser_regex.match(description)
if not match:
raise ValueError(description)
product = Product()
product.image_name = ProductCreator._GentlemanTrouser.get_image_name_from_match(match)
product.add_term(ProductCreator._GentlemanTrouser.get_brand_from_match(match))
product.add_term(ProductCreator._GentlemanTrouser.get_type_from_match(match))
product.add_term(ProductCreator._GentlemanTrouser.get_price_from_match(match))
for pocket_type in ProductCreator._GentlemanTrouser.get_pocket_types_from_match(match):
product.add_term(pocket_type)
for colour in ProductCreator._GentlemanTrouser.get_colours_from_match(match):
product.add_term(colour)
for material in ProductCreator._GentlemanTrouser.get_materials_from_match(match):
product.add_term(material)
return product
@staticmethod
def create_from_dictionary(attribute_dict, image_name=None):
product = Clothing()
if image_name is not None:
product.image_name = image_name
product.term = attribute_dict
pass
class _GentlemanTrouser(object):
@staticmethod
def get_image_name_from_url(url):
url_parts = url.split('/')
return url_parts[-1].strip()
@staticmethod
def get_image_name_from_match(match):
url = match.group(1)
#return ProductCreator._LadyBlouse.get_image_name_from_url(url)
return url.replace('/', '')
@staticmethod
def get_brand_from_match(match):
brand = match.group(2).strip()
return brand
@staticmethod
def get_type_from_match(match):
cloth_type = match.group(3).strip()
return cloth_type
@staticmethod
def get_price_from_match(match):
price = match.group(5).strip()
return int(price.replace(',', ''))
@staticmethod
def get_pocket_types_from_match(match):
pocket_types = match.group(10)
return [pocket_type.strip() for pocket_type in pocket_types.split(',')]
@staticmethod
def get_materials_from_match(match):
materials = match.group(11)
return ProductCreator._GentlemanTrouser.get_materials_from_str(materials)
raise NotImplementedError()
@staticmethod
def get_materials_from_str(materials):
r = re.compile('([a-zA-Z]*)')
return [ m for m in r.findall(materials) if len(m) > 0]
@staticmethod
def get_colours_from_match(match):
colours = match.group(4)
return [colour.strip() for colour in colours.split('/')]
class _LadyBlouse(object):
@staticmethod
def get_image_name_from_url(url):
url_parts = url.split('/')
return url_parts[-1]
pass
@staticmethod
def get_materials_from_str(materials):
r = re.compile('([a-zA-Z]*)')
return [ m for m in r.findall(materials) if len(m) > 0]
@staticmethod
def get_image_name_from_match(match):
url = match.group(1)
#return ProductCreator._LadyBlouse.get_image_name_from_url(url)
return url.replace('/', '')
@staticmethod
def get_brand_from_match(match):
if not match.group(2):
return match.group(4)
else:
return match.group(2)
@staticmethod
def get_cloth_type_from_match(match):
if not match.group(3):
return match.group(5)
else:
return match.group(3)
@staticmethod
def get_colours_from_match(match):
colours = match.group(6)
return [colour.strip() for colour in colours.split('/')]
@staticmethod
def get_price_from_match(match):
"""
returns the price in eurocents as int
"""
return int(match.group(7).replace(',', ''))
@staticmethod
def get_collar_type_from_match(match):
return match.group(8)
@staticmethod
def get_materials_from_match(match):
"""
returns all materials (without any percent information
"""
materials = match.group(9)
return ProductCreator._LadyBlouse.get_materials_from_str(materials)
| {
"content_hash": "58cbc3a4e8c65ad2412a7658fe2c4b09",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 359,
"avg_line_length": 35.27777777777778,
"alnum_prop": 0.5785826771653543,
"repo_name": "dustywind/bachelor-thesis",
"id": "f4f580ede5eb74cb48def7113f7605f0f883b8b2",
"size": "6374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "impl/recommender/product/productcreator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2554"
},
{
"name": "HTML",
"bytes": "4363"
},
{
"name": "JavaScript",
"bytes": "26943"
},
{
"name": "Makefile",
"bytes": "10588"
},
{
"name": "Python",
"bytes": "109888"
},
{
"name": "Shell",
"bytes": "3012"
},
{
"name": "TeX",
"bytes": "163624"
}
],
"symlink_target": ""
} |
"""
binder.models.user
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by Ananth Bhaskararaman
:license: MIT, see LICENSE for more details
"""
from ..database import db
class User(db.Model):
UUID = db.Column(db.CHAR(36), primary_key=True)
Name = db.Column(db.String(50))
Email = db.Column(db.String(100))
IsActive = db.Column(db.Boolean)
def __init__(self, u_id, name, email, is_active):
self.UUID = u_id
self.Name = name
self.Email = email
self.IsActive = is_active
def is_authenticated(self):
return True
def is_active(self):
return self.IsActive
def is_anonymous(self):
return True
def get_id(self):
return self.UUID
| {
"content_hash": "0023be6b38038939611b0cc70cbcf79d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 53,
"avg_line_length": 21.676470588235293,
"alnum_prop": 0.587516960651289,
"repo_name": "ananthb/binder",
"id": "da21f24fa31c26b89017dd5a6eebcd8828f06134",
"size": "737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "binder/models/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2740"
},
{
"name": "HTML",
"bytes": "4602"
},
{
"name": "Python",
"bytes": "13290"
}
],
"symlink_target": ""
} |
"""
Predict from snapshot file.
Usage:
run_predict.py snapshot
Where snapshot was saved by `run_train.py`. Model type and folders are as
defined in `config.py`.
"""
from argparse import ArgumentParser
import cPickle
import gzip
import logging
import os
import theano.tensor as T
from config import config
from sfddd import models
from sfddd.sgd import Predictor
from sfddd.submit import save_submission
from sfddd.util import timed
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
def get_args():
p = ArgumentParser()
p.add_argument('snapshot', type=str,
help='snapshot file containing model weights')
return p.parse_args()
@timed
def main(args):
sample_submission = os.path.join(config.paths.data_folder,
'sample_submission.csv')
names = ['Xs', 'Ys', 'Xv', 'Yv', 'Xt', 'test_fnames']
data = {}
for name in names:
fn = name + '.pkl.gz'
with gzip.open(os.path.join(config.paths.cache_folder, fn)) as fi:
data[name] = cPickle.load(fi)
input_var = T.tensor4('inputs')
if config.model == 'vgg':
mdl = models.Vgg16(input_var)
elif config.model == 'inc':
mdl = models.IncV3(input_var)
else:
logger.error("Unrecognized model name: %s" % config.model)
raise ValueError(config.model)
predictor = Predictor(batch_size=2)
pred = predictor.predict(data['Xt'], mdl, snapshot=args.snapshot)
save_submission(pred, data['test_fnames'], sample_submission,
config.paths.out_folder)
if __name__ == '__main__':
main(get_args())
| {
"content_hash": "882c399ff624899dd2284d4e37b3f25c",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 74,
"avg_line_length": 25.307692307692307,
"alnum_prop": 0.6425531914893617,
"repo_name": "rmunoz12/sfddd",
"id": "8a15e6d4c74dbd6e14ee5eaff5730ac23e2e47c7",
"size": "1645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run_predict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42842"
}
],
"symlink_target": ""
} |
from django_ical.views import ICalFeed
from .models import Event
class EventFeed(ICalFeed):
#product_id = '-//example.com//Example//EN'
timezone = 'UTC'
def items(self):
return Event.objects.all().order_by('-event_start')
def item_title(self, item):
return item.event_name
def item_description(self, item):
return ''
def item_start_datetime(self, item):
return item.setup_start
def item_end_datetime(self, item):
return item.teardown_end
def item_link(self, item):
return ''
| {
"content_hash": "9f0d833abfc24a09b5c844c1a986fe69",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 59,
"avg_line_length": 21.692307692307693,
"alnum_prop": 0.6312056737588653,
"repo_name": "bable5/schdlr",
"id": "87b33472bd18780959d3b3b335db974b753b6b07",
"size": "564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/schedule/feeds.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12692"
},
{
"name": "JavaScript",
"bytes": "179952"
},
{
"name": "PHP",
"bytes": "6147"
},
{
"name": "Puppet",
"bytes": "479"
},
{
"name": "Python",
"bytes": "11020"
},
{
"name": "Shell",
"bytes": "210"
}
],
"symlink_target": ""
} |
from importlib import import_module
# Django
from django.conf import settings
from django.db.models.signals import post_syncdb
# External
try:
from south.signals import post_migrate
South = True
except ImportError:
South = False
# User
import notifier
from notifier.models import Backend
from notifier import settings as notifier_settings
###############################################################################
# Code
###############################################################################
def create_backends(app=None, sender=None, **kwargs):
"""
Creates/Updates Backend objects based on NOTIFIER_BACKENDS settings.
All values except `enabled` are derived from the Backend class and
not suppossed to be modified by user. They will be over-written on restart.
"""
name = app or sender.name
if South and not name == 'notifier':
return
for klass in notifier_settings.BACKEND_CLASSES:
try:
backend = Backend.objects.get(name=klass.name)
except Backend.DoesNotExist:
backend = Backend()
backend.enabled = True
finally:
backend.display_name = klass.display_name
backend.name = klass.name
backend.description = klass.description
backend.klass = ('.'.join([klass.__module__, klass.__name__]))
backend.save()
def create_notifications(app=None, sender=None, **kwargs):
"""
Creates all the notifications specified in notifiers.py for all apps
in INSTALLED_APPS
"""
name = app or sender.name
if South and not name == 'notifier':
return
for installed_app in settings.INSTALLED_APPS:
try:
import_module(
installed_app + '.' + notifier_settings.AUTO_CREATE_MODULE_NAME
)
except ImportError:
pass
if South:
post_migrate.connect(
create_backends,
dispatch_uid="notifier.management.create_backends"
)
post_migrate.connect(
create_notifications,
dispatch_uid="notifier.management.create_notifications",
)
else:
post_syncdb.connect(
create_backends,
dispatch_uid="notifier.management.create_backends",
sender=notifier.models
)
post_syncdb.connect(
create_notifications,
dispatch_uid="notifier.management.create_notifications",
sender=notifier.models
)
| {
"content_hash": "70a4ba4a18bc40afb511d494362ad19a",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 79,
"avg_line_length": 27.56179775280899,
"alnum_prop": 0.6049735018344884,
"repo_name": "flc/django-notifier",
"id": "fb7a74e8a835573ff11f1e3b178c593daa1549e9",
"size": "2632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notifier/management/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "51480"
}
],
"symlink_target": ""
} |
"""Generic Node base class for all workers that run on hosts."""
from oslo_config import cfg
from oslo_utils import importutils
from report.openstack.common import service
from report import wsgi
from report import rpc
from report import utils
from report import exception
from report import context
from report.i18n import _, _LE, _LI, _LW
from oslo_log import log as logging
import oslo_messaging as messaging
import os
import sys
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='Seconds between nodes reporting state to datastore'),
cfg.BoolOpt('periodic_enable',
default=True,
help='Enable periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='Range of seconds to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
cfg.ListOpt('enabled_ssl_apis',
default=[],
help='A list of APIs with enabled SSL'),
cfg.StrOpt('reportapi_listen',
default="0.0.0.0",
help='The IP address on which the OpenStack API will listen.'),
cfg.IntOpt('reportapi_listen_port',
default=8888,
help='The port on which the OpenStack API will listen.'),
cfg.IntOpt('reportapi_workers',
help='Number of workers for OpenStack API service. The default '
'will be the number of CPUs available.'),
cfg.StrOpt('cert_manager',
default='nova.cert.manager.CertManager',
help='Full class name for the Manager for cert'),
cfg.IntOpt('service_down_time',
default=60,
help='Maximum time since last check-in for up service'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
CONF.import_opt('host', 'nova.netconf')
LOG = logging.getLogger(__name__)
class Service(service.Service):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table.
"""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_interval=None, periodic_fuzzy_delay=None,
service_name=None, *args, **kwargs):
super(Service, self).__init__()
"""
if not rpc.initialized():
rpc.init(CONF)
"""
self.host = host
self.binary = binary
self.topic = topic
LOG.debug("Topic:" + self.topic + "; Host:" + self.host)
self.manager_class_name = "report.rpt.manager.RptManager"
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=self.host,
service_name=service_name,
*args, **kwargs)
self.report_interval = report_interval
self.periodic_interval = periodic_interval
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.basic_config_check()
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
#setup_profiler(binary, host)
def start(self):
"""
version_string = version.version_string()
LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'),
{'topic': self.topic, 'version_string': version_string})
self.model_disconnected = False
self.manager.init_host()
ctxt = context.get_admin_context()
try:
service_ref = db.service_get_by_args(ctxt,
self.host,
self.binary)
self.service_id = service_ref['id']
except exception.NotFound:
self._create_service_ref(ctxt)
LOG.debug("Creating RPC server for service %s", self.topic)
"""
target = messaging.Target(topic=self.topic, server=self.host)
endpoints = [self.manager]
endpoints.extend(self.manager.additional_endpoints)
#serializer = objects_base.CinderObjectSerializer()
self.rpcserver = rpc.get_server(target, endpoints)
self.rpcserver.start()
"""
self.manager.init_host_with_rpc()
if self.report_interval:
pulse = loopingcall.FixedIntervalLoopingCall(
self.report_state)
pulse.start(interval=self.report_interval,
initial_delay=self.report_interval)
self.timers.append(pulse)
if self.periodic_interval:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
periodic = loopingcall.FixedIntervalLoopingCall(
self.periodic_tasks)
periodic.start(interval=self.periodic_interval,
initial_delay=initial_delay)
self.timers.append(periodic)
"""
def basic_config_check(self):
"""Perform basic config checks before starting service."""
# Make sure report interval is less than service down time
if self.report_interval:
if CONF.service_down_time <= self.report_interval:
new_down_time = int(self.report_interval * 2.5)
LOG.warning(
_LW("Report interval must be less than service down "
"time. Current config service_down_time: "
"%(service_down_time)s, report_interval for this: "
"service is: %(report_interval)s. Setting global "
"service_down_time to: %(new_down_time)s"),
{'service_down_time': CONF.service_down_time,
'report_interval': self.report_interval,
'new_down_time': new_down_time})
CONF.set_override('service_down_time', new_down_time)
def _create_service_ref(self, context):
"""
zone = CONF.storage_availability_zone
service_ref = db.service_create(context,
{'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
'availability_zone': zone})
self.service_id = service_ref['id']
"""
pass
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_interval=None,
periodic_fuzzy_delay=None, service_name=None):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'cinder-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_interval: defaults to CONF.periodic_interval
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(sys.argv[0])
if not topic:
topic = binary
if not manager:
"""
subtopic = binary.rpartition('cinder-')[2]
manager = CONF.get('%s_manager' % subtopic, None)
"""
manager = "report.rpt.manager.RptManager"
"""
if report_interval is None:
report_interval = CONF.report_interval
if periodic_interval is None:
periodic_interval = CONF.periodic_interval
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
"""
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_interval=periodic_interval,
periodic_fuzzy_delay=periodic_fuzzy_delay,
service_name=service_name)
return service_obj
def kill(self):
"""Destroy the service object in the datastore."""
self.stop()
"""
try:
self.service_ref.destroy()
except exception.NotFound:
LOG.warning(_LW('Service killed that has no database entry'))
"""
def stop(self):
try:
self.rpcserver.stop()
self.rpcserver.wait()
except Exception:
pass
"""
try:
self.manager.cleanup_host()
except Exception:
LOG.exception(_LE('Service error occurred during cleanup_host'))
pass
"""
super(Service, self).stop()
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def basic_config_check(self):
"""Perform basic config checks before starting processing."""
# Make sure the tempdir exists and is writable
try:
with utils.tempdir():
pass
except Exception as e:
LOG.error(_LE('Temporary directory is invalid: %s'), e)
sys.exit(1)
class WSGIService(object):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None, use_ssl=False, max_url_len=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = (getattr(CONF, '%s_workers' % name, 1))
self.use_ssl = use_ssl
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port,
use_ssl=self.use_ssl,
max_url_len=max_url_len)
# Pull back actual port used
self.port = self.server.port
self.backdoor_port = None
LOG.debug("WSGIServer init! name:%s, manager:%s, host:%s, port:%s" %
(self.name, str(self.manager), self.host, str(self.port)))
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self.server.reset()
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.manager.pre_start_hook()
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
self.server.start()
if self.manager:
self.manager.post_start_hook()
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
def process_launcher():
return service.ProcessLauncher()
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(server, workers=None):
global _launcher
if _launcher:
raise RuntimeError(('serve() can only be called once'))
_launcher = service.launch(server, workers=workers)
def wait():
_launcher.wait()
| {
"content_hash": "32c80e1fc5b0c470df57a3c63760ddea",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 80,
"avg_line_length": 36.04347826086956,
"alnum_prop": 0.5656664656212304,
"repo_name": "Aaron-DH/report",
"id": "d37b42baca37166bec1edbb98e243f235298d381",
"size": "14034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "report/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "431128"
}
],
"symlink_target": ""
} |
"""
Exception raised when a body request could be parsed normally, but it's contents doesn't satisfies the specified
values by the app
"""
from rinzler.exceptions import RinzlerHttpException
__author__ = ["Rinzler<github.com/feliphebueno>", "4ndr<github.com/4ndr>"]
class UnacceptableInputException(RinzlerHttpException):
"""
UnacceptableInputException
"""
status_code = 406
exception_name = "Not Acceptable"
| {
"content_hash": "ddc4685d4a6e68d1baca3eb7dd79433f",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 112,
"avg_line_length": 28.8,
"alnum_prop": 0.7430555555555556,
"repo_name": "feliphebueno/Rinzler",
"id": "14f9d0cf04dbef200ce56fa71a24c212cf89155e",
"size": "432",
"binary": false,
"copies": "1",
"ref": "refs/heads/v2",
"path": "rinzler/exceptions/unacceptable_input_exception.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35437"
}
],
"symlink_target": ""
} |
from google.cloud import aiplatform_v1beta1
def sample_list_model_deployment_monitoring_jobs():
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListModelDeploymentMonitoringJobsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_model_deployment_monitoring_jobs(request=request)
# Handle the response
for response in page_result:
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_ListModelDeploymentMonitoringJobs_sync]
| {
"content_hash": "944820a5a2decd4d9f4555e961c09efd",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 97,
"avg_line_length": 30.4,
"alnum_prop": 0.7483552631578947,
"repo_name": "googleapis/python-aiplatform",
"id": "9533d98066f693c78e878e36437c3dd76ba0ad69",
"size": "1666",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
from .r_base import r_base
# r modules
from .r_anova import r_anova
from .r_dataNormalization import r_dataNormalization
from .r_missingValues import r_missingValues
from .r_pca import r_pca
from .r_svd import r_svd
from .r_pls import r_pls
from .r_spls import r_spls
from .r_statistics import r_statistics
from .r_enrichment import r_enrichment
class r_interface(
r_anova,
r_dataNormalization,
r_missingValues,
r_pca,
r_pls,
r_spls,
r_statistics,
r_svd,
r_enrichment):
'''conveniency class that wraps all r class'''
pass; | {
"content_hash": "1ca9bd6f1ad1e533a605b50f438f7a41",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 52,
"avg_line_length": 25.125,
"alnum_prop": 0.6666666666666666,
"repo_name": "dmccloskey/r_statistics",
"id": "b52275e9ee1272d4a82dc142081de4f542f99f3c",
"size": "603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "r_statistics/r_interface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "265146"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from filer.models import Image
from cms.models import CMSPlugin
class Product(models.Model):
title = models.CharField(_('title'), max_length=255)
description = models.TextField(_('description'))
price = models.FloatField(_('price'), default=0.0)
created = models.DateTimeField(_('created'), auto_now_add=True)
updated = models.DateTimeField(_('updated'), auto_now=True)
url = models.URLField(_('url'), default='http://yuelingshan.com/')
images = models.ManyToManyField(Image)
def __unicode__(self):
return self.title
class Meta:
verbose_name = _('Product')
verbose_name_plural = _('Products')
ordering = ['-updated']
class ProductPlugin(CMSPlugin):
title = models.CharField(
_('title'), max_length=255, default='Product Gallery Title')
description = models.TextField(
_('description'), default='Product Gallery Sub-Title.')
count = models.IntegerField(_('count'), default=4)
def __unicode__(self):
return '%s' % self.count
def get_products(self):
return Product.objects.all()[:self.count]
| {
"content_hash": "244fc1945671aade9649a1f149b5f5b0",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 70,
"avg_line_length": 33.388888888888886,
"alnum_prop": 0.6589018302828619,
"repo_name": "amaozhao/basecms",
"id": "e39f564b681b3dc492b934f78993d6bc07728b3d",
"size": "1202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "product/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "248555"
},
{
"name": "JavaScript",
"bytes": "616778"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "4218408"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
} |
from datetime import datetime
from decimal import Decimal
from django.conf import settings
from django.contrib.contenttypes import generic
from django.db import models, transaction
from etc import cache
from etc.entities import EntityTypes, type_to_class, obj_to_type
from etc.func import crc32, salted_hash
import json
import logging
from org.models import Org
from sourcing.models import SourceTaggedItem
import sys
import urlparse
from users.models import User
#from utils.donations import nfg_api, mock_nfg_api
from utils.misc_helpers import send_admins_error_email
class Donor(models.Model):
id = models.AutoField(db_column='donor_id', primary_key=True)
user = models.ForeignKey(User, db_column='user_id', null=True)
first_name = models.CharField(max_length=30, db_column='first_name')
last_name = models.CharField(max_length=30, db_column='last_name')
email = models.EmailField(db_column='email', max_length=200, unique=True)
email_crc32 = models.IntegerField(db_column='email_crc32')
class Meta:
db_table = 'donors'
@property
def get_first_name(self):
return self.first_name
# For distinguishing in templates between Donors and Jumo users
@property
def is_jumo_user(self):
return self.user and self.user.is_active
def save(self, *args, **kw):
self.email = self.email.lower()
self.email_crc32 = crc32(self.email)
super(Donor, self).save(*args, **kw)
@property
def email_hash(self):
return salted_hash(self.email)
@classmethod
def get_or_create(cls, first_name, last_name, email, user=None):
email = email.lower()
d, created = Donor.objects.get_or_create(email=email, email_crc32=crc32(email))
state_changed = False
if user and user != d.user:
d.user = user
state_changed = True
else:
try:
# For those donating anonymously
# who are already registered (active) users
existing_user = User.objects.get(email=email, is_active=True)
d.user=existing_user
except User.DoesNotExist:
pass
if d.first_name != first_name:
d.first_name = first_name
state_changed = True
if d.last_name != last_name:
d.last_name = last_name
state_changed = True
if state_changed:
d.save()
return d
@classmethod
def from_email(cls, email):
try:
email = email.lower()
return Donor.objects.get(email=email, email_crc32=crc32(email))
except Donor.DoesNotExist:
return None
except Exception, err:
logging.exception("Error Retrieving Donor On Email")
return None
@classmethod
def from_user(cls, user):
try:
return Donor.objects.get(user=user)
except Donor.DoesNotExist:
return None
except Exception, err:
logging.exception("Error Retrieving Donor From User")
return None
@classmethod
def get_all_donors_for_entity(cls, entity):
return Donor.objects.raw("""
select do.*
from donations d
join donors do
using(donor_id)
where d.entity_type=%(entity_type)s
and d.entity_id=%(entity_id)s
""", {'entity_type': entity.type, 'entity_id': entity.id})
class Meta:
db_table = 'donors'
def is_subscribed_to(self, pub_id):
return len(self.subscriptions.filter(id=pub_id, subscription__subscribed=True).values_list('id')) > 0
class DonorPhone(models.Model):
id = models.AutoField(db_column='donor_phone_id', primary_key=True)
donor = models.ForeignKey(Donor, db_column='donor_id')
phone = models.CharField(max_length=50)
class Meta:
db_table='donor_phone_numbers'
class DonorAddress(models.Model):
id = models.AutoField(db_column='donor_address_id', primary_key=True)
donor = models.ForeignKey(Donor, db_column='donor_id', related_name='addresses')
street1 = models.CharField(max_length=255, db_column='street1')
street2 = models.CharField(max_length=255, db_column='street2', blank=True)
city = models.CharField(max_length=255, db_column='city')
region = models.CharField(max_length=255, db_column='region', blank=True, default="")
postal_code = models.CharField(max_length=14, db_column='postal_code')
country = models.CharField(max_length=2, db_column='country', blank=True)
is_billing = models.BooleanField(default=True, db_column='is_billing')
is_shipping = models.BooleanField(default=True, db_column='is_shipping')
class Meta:
db_table = 'donor_addresses'
@classmethod
def get_or_create(cls, **kwargs):
"""
EX: dict(donor=donor, street1='000 FakeTown', street2='', city='FakeyVille',
region='NY', postal_code=10012, country="United States of America")
"""
da, created = DonorAddress.objects.get_or_create(donor=kwargs["donor"],
street1=kwargs["street1"],
street2=kwargs.get("street2"),
city=kwargs["city"],
region=kwargs["region"],
postal_code=kwargs["postal_code"],
country=kwargs["country"])
return da
class CreditCard(models.Model):
id = models.AutoField(db_column='credit_card_id', primary_key=True)
donor = models.ForeignKey(Donor, db_column='donor_id', related_name='credit_cards')
donor_address = models.ForeignKey(DonorAddress, db_column='donor_address_id')
date_last_charged = models.DateTimeField(db_column='date_last_charged')
status = models.CharField(max_length=50, db_column='status')
nfg_card_on_file_id = models.IntegerField(db_column='nfg_card_on_file_id')
nfg_cof_is_active = models.BooleanField(db_column='nfg_cof_is_active')
class Meta:
db_table = 'credit_cards'
class CardStatus:
"""TODO: use statuses to avoid rebilling people we know are failures"""
DECLINED = 'declined'
def disable_cof(self):
pass
class DonationProcessingFailed(Exception):
pass
class Donation(models.Model):
id = models.AutoField(db_column='donation_id', primary_key=True)
donor = models.ForeignKey(Donor, db_column='donor_id', related_name='donations')
credit_card = models.ForeignKey(CreditCard, db_column='credit_card_id')
entity_type = models.CharField(max_length=100, db_column='entity_type')
entity_id = models.IntegerField(db_column='entity_id')
amount = models.DecimalField(max_digits=19, decimal_places=2, db_column='amount')
jumo_amount = models.DecimalField(max_digits=19, decimal_places=2, db_column='jumo_amount')
street1 = models.CharField(max_length=255, db_column='street1')
street2 = models.CharField(max_length=255, db_column='street2', blank=True)
city = models.CharField(max_length=255, db_column='city')
region = models.CharField(max_length=255, blank=True, default="", db_column='region')
postal_code = models.CharField(max_length=14, db_column='postal_code')
country = models.CharField(max_length=2, db_column='country', blank=True)
phone = models.CharField(max_length=50, db_column='phone')
comment = models.CharField(max_length=2000, db_column='comment', blank=True)
charge_id = models.IntegerField(db_column='charge_id', null=True, default=None)
charge_status = models.CharField(max_length=100, db_column='charge_status')
payment_status = models.CharField(max_length=100, db_column='payment_status')
last_payment_attempt = models.DateTimeField(db_column='last_payment_attempt')
date = models.DateTimeField(auto_now_add=True, db_column='donation_date')
version = models.IntegerField(db_column='version', default=1)
list_publicly = models.BooleanField(db_column='list_publicly')
is_anonymous = models.BooleanField(db_column='is_anonymous')
_source_tagged_items = generic.GenericRelation(SourceTaggedItem,
content_type_field='item_type',
object_id_field='item_id')
class Meta:
db_table = 'donations'
class ChargeStatus:
DO_NOT_ATTEMPT = "do_not_attempt"
READY = "ready"
ATTEMPTING_PAYMENT = "attempting_payment"
PAYMENT_COMPLETE = "payment_complete"
class PaymentStatus:
UNPAID = "unpaid"
PAID = "paid"
FAILED = "failed"
PERMAFAILED = "permafailed"
@classmethod
def get(cls, id, force_db=False):
if force_db:
obj = cls.objects.get(id=id)
cache.bust(obj)
return obj
return cache.get(cls, id)
@classmethod
def multiget(cls, ids, force_db=False):
if force_db:
return Donation.objects.filter(id__in=ids)
return cache.get(cls, ids)
@classmethod
def get_retryable_donations(cls):
case_statement = ' '.join(["when %s then %s" % (idx+1,val) for idx, val in enumerate(settings.PAYMENT_RETRY_SCHEDULE) ])
return Donation.objects.raw("""
select
do.*,
do.last_payment_attempt + interval case count(p.donation_id) """ + case_statement + """ end day as min_retry_date
from donations do
join payments p
on p.donation_id = do.payment_id
and did_succeed=0
where do.payment_status = %(failed)s
and do.charge_status = %(ready)s
group by do.donation_id
having utc_timestamp() >= min_retry_date
""" , {'failed': cls.PaymentStatus.FAILED,
'ready': cls.ChargeStatus.READY
})
@classmethod
def get_donations_for_entity(cls, entity):
return Donation.objects.filter(entity_type=entity.type, entity_id=entity.id)
@classmethod
def get_processable_donations_for_entity(cls, entity):
return Donation.objects.filter(entity_type=entity.type,
entity_id=entity.id,
charge_status=cls.ChargeStatus.READY,
payment_status=cls.PaymentStatus.UNPAID)
@property
def get_source_tags(self):
# @todo: not optimized
return [item.tag for item in self._source_tagged_items.all()]
@property
def entity(self):
return cache.get(type_to_class(self.entity_type), self.entity_id)
@property
def get_beneficiaries(self):
return DonationBeneficiary.objects.filter(donation=self)
def mark_attempting(self):
# Optimistic locking
affected = Donation.objects.filter(id=self.id, version=self.version).update(charge_status=self.ChargeStatus.ATTEMPTING_PAYMENT, version=self.version+1)
if not affected:
msg = "Tried optimistic lock of donation row with version %d, but a higher version existed. This is probably BAD. Unless somebody did an ad hoc update, there may be more than 1 instance of bill_campaignd running." % self.version
send_admins_error_email("DONATION ERROR", msg, sys.exc_info())
raise DonationProcessingFailed, msg
def process(self):
pass
@transaction.commit_on_success()
def _execute_payment(self, payment_fails):
pass
@classmethod
def create_and_process(cls, **kwargs):
pass
@classmethod
@transaction.commit_on_success()
def _create_and_process(cls, **kwargs):
pass
@classmethod
def create_and_store(cls, **kwargs):
pass
@classmethod
@transaction.commit_on_success()
def _create(cls, store_cc=False, **kwargs):
pass
def _to_nfg_dict(self):
pass
@classmethod
def _cc_info_to_nfg_dict(cls, **kwargs):
pass
def add_source_tags(self, source_tags):
pass
def save(self, *args, **kwargs):
pass
class DonationBeneficiary(models.Model):
id = models.AutoField(db_column='donation_beneficiary_id', primary_key=True)
donation = models.ForeignKey(Donation, db_column='donation_id', related_name='beneficiaries')
org = models.ForeignKey(Org, db_column='org_id')
amount = models.DecimalField(max_digits=19, decimal_places=2, db_column='amount')
class Meta:
db_table = 'donation_beneficiaries'
unique_together = ('donation', 'org')
class Payment(models.Model):
id = models.AutoField(db_column='payment_id', primary_key=True)
status = models.CharField(max_length=255, db_column='status', blank=True)
donation = models.ForeignKey(Donation, db_column='donation_id')
did_succeed = models.BooleanField(db_column='did_succeed')
error_data = models.CharField(max_length=2000, db_column='error_data')
payment_date = models.DateTimeField(db_column='payment_date', default=datetime.utcnow)
class Meta:
db_table = 'payments'
| {
"content_hash": "e2c57851ec222d8c4d6b22cfaf45ef1f",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 240,
"avg_line_length": 37.54857142857143,
"alnum_prop": 0.6352153401308781,
"repo_name": "jumoconnect/openjumo",
"id": "92d92e47a83889fea0c3edc8785d6a09c0d03cf8",
"size": "13142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jumodjango/donation/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "5874"
},
{
"name": "JavaScript",
"bytes": "341559"
},
{
"name": "Python",
"bytes": "928137"
},
{
"name": "Shell",
"bytes": "871"
}
],
"symlink_target": ""
} |
"""Utility functions for parsing and building Ethernet packet/contents."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2017 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASISo
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
from ryu.lib import mac
from ryu.lib.packet import arp, ethernet, icmp, icmpv6, ipv4, ipv6, packet, vlan
from ryu.ofproto import ether
from ryu.ofproto import inet
from valve_util import btos
def mac_addr_is_unicast(mac_addr):
"""Returns True if mac_addr is a unicast Ethernet address.
Args:
mac_addr (str): MAC address.
Returns:
bool: True if a unicast Ethernet address.
"""
msb = mac_addr.split(':')[0]
return msb[-1] in '02468aAcCeE'
def parse_pkt(pkt):
"""Return parsed Ethernet packet.
Args:
pkt (ryu.lib.packet.packet): packet received from dataplane.
Returns:
ryu.lib.packet.ethernet: Ethernet packet.
"""
return pkt.get_protocol(ethernet.ethernet)
def build_pkt_header(eth_src, eth_dst, vid, dl_type):
"""Return an Ethernet packet header.
Args:
eth_src (str): source Ethernet MAC address.
eth_dst (str): destination Ethernet MAC address.
vid (int or None): VLAN VID to use (or None)
dl_type (int): EtherType.
Returns:
ryu.lib.packet.ethernet: Ethernet packet with header.
"""
pkt_header = packet.Packet()
if vid is None:
eth_header = ethernet.ethernet(
eth_dst, eth_src, dl_type)
pkt_header.add_protocol(eth_header)
else:
eth_header = ethernet.ethernet(
eth_dst, eth_src, ether.ETH_TYPE_8021Q)
pkt_header.add_protocol(eth_header)
vlan_header = vlan.vlan(vid=vid, ethertype=dl_type)
pkt_header.add_protocol(vlan_header)
return pkt_header
def arp_request(eth_src, vid, src_ip, dst_ip):
"""Return an ARP request packet.
Args:
eth_src (str): Ethernet source address.
vid (int or None): VLAN VID to use (or None).
src_ip (ipaddress.IPv4Address): source IPv4 address.
dst_ip (ipaddress.IPv4Address): requested IPv4 address.
Returns:
ryu.lib.packet.arp: serialized ARP request packet.
"""
pkt = build_pkt_header(eth_src, mac.BROADCAST_STR, vid, ether.ETH_TYPE_ARP)
arp_pkt = arp.arp(
opcode=arp.ARP_REQUEST, src_mac=eth_src,
src_ip=str(src_ip), dst_mac=mac.DONTCARE_STR, dst_ip=str(dst_ip))
pkt.add_protocol(arp_pkt)
pkt.serialize()
return pkt
def arp_reply(eth_src, eth_dst, vid, src_ip, dst_ip):
"""Return an ARP reply packet.
Args:
eth_src (str): Ethernet source address.
eth_dst (str): destination Ethernet MAC address.
vid (int or None): VLAN VID to use (or None).
src_ip (ipaddress.IPv4Address): source IPv4 address.
dst_ip (ipaddress.IPv4Address): destination IPv4 address.
Returns:
ryu.lib.packet.arp: serialized ARP reply packet.
"""
pkt = build_pkt_header(eth_src, eth_dst, vid, ether.ETH_TYPE_ARP)
arp_pkt = arp.arp(
opcode=arp.ARP_REPLY, src_mac=eth_src,
src_ip=src_ip, dst_mac=eth_dst, dst_ip=dst_ip)
pkt.add_protocol(arp_pkt)
pkt.serialize()
return pkt
def echo_reply(eth_src, eth_dst, vid, src_ip, dst_ip, data):
"""Return an ICMP echo reply packet.
Args:
eth_src (str): Ethernet source address.
eth_dst (str): destination Ethernet MAC address.
vid (int or None): VLAN VID to use (or None).
src_ip (ipaddress.IPv4Address): source IPv4 address.
dst_ip (ipaddress.IPv4Address): destination IPv4 address.
Returns:
ryu.lib.packet.icmp: serialized ICMP echo reply packet.
"""
pkt = build_pkt_header(eth_src, eth_dst, vid, ether.ETH_TYPE_IP)
ipv4_pkt = ipv4.ipv4(
dst=dst_ip, src=src_ip, proto=inet.IPPROTO_ICMP)
pkt.add_protocol(ipv4_pkt)
icmp_pkt = icmp.icmp(
type_=icmp.ICMP_ECHO_REPLY, code=icmp.ICMP_ECHO_REPLY_CODE,
data=data)
pkt.add_protocol(icmp_pkt)
pkt.serialize()
return pkt
def ipv6_link_eth_mcast(dst_ip):
"""Return an Ethernet multicast address from an IPv6 address.
See RFC 2464 section 7.
Args:
dst_ip (ipaddress.IPv6Address): IPv6 address.
Returns:
str: Ethernet multicast address.
"""
mcast_mac_bytes = b'\x33\x33' + dst_ip.packed[-4:]
mcast_mac = ':'.join(['%02X' % ord(x) for x in mcast_mac_bytes])
return mcast_mac
def ipv6_solicited_node_from_ucast(ucast):
"""Return IPv6 solicited node multicast address from IPv6 unicast address.
See RFC 3513 section 2.7.1.
Args:
ucast (ipaddress.IPv6Address): IPv6 unicast address.
Returns:
ipaddress.IPv6Address: IPv6 solicited node multicast address.
"""
link_mcast_prefix = ipaddress.ip_interface(btos('ff02::1:ff00:0/104'))
mcast_bytes = link_mcast_prefix.packed[:13] + ucast.packed[-3:]
link_mcast = ipaddress.IPv6Address(mcast_bytes)
return link_mcast
def nd_request(eth_src, vid, src_ip, dst_ip):
"""Return IPv6 neighbor discovery request packet.
Args:
eth_src (str): source Ethernet MAC address.
vid (int or None): VLAN VID to use (or None).
src_ip (ipaddress.IPv6Address): source IPv6 address.
dst_ip (ipaddress.IPv6Address): requested IPv6 address.
Returns:
ryu.lib.packet.ethernet: Serialized IPv6 neighbor discovery packet.
"""
nd_mac = ipv6_link_eth_mcast(dst_ip)
ip_gw_mcast = ipv6_solicited_node_from_ucast(dst_ip)
pkt = build_pkt_header(eth_src, nd_mac, vid, ether.ETH_TYPE_IPV6)
ipv6_pkt = ipv6.ipv6(
src=str(src_ip), dst=ip_gw_mcast, nxt=inet.IPPROTO_ICMPV6)
pkt.add_protocol(ipv6_pkt)
icmpv6_pkt = icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_SOLICIT,
data=icmpv6.nd_neighbor(
dst=dst_ip,
option=icmpv6.nd_option_sla(hw_src=eth_src)))
pkt.add_protocol(icmpv6_pkt)
pkt.serialize()
return pkt
def nd_reply(eth_src, eth_dst, vid, src_ip, dst_ip, hop_limit):
"""Return IPv6 neighbor discovery reply packet.
Args:
eth_src (str): source Ethernet MAC address.
eth_dst (str): destination Ethernet MAC address.
vid (int or None): VLAN VID to use (or None).
src_ip (ipaddress.IPv6Address): source IPv6 address.
dst_ip (ipaddress.IPv6Address): destination IPv6 address.
hop_limit (int): IPv6 hop limit.
Returns:
ryu.lib.packet.ethernet: Serialized IPv6 neighbor discovery packet.
"""
pkt = build_pkt_header(
eth_src, eth_dst, vid, ether.ETH_TYPE_IPV6)
ipv6_reply = ipv6.ipv6(
src=src_ip,
dst=dst_ip,
nxt=inet.IPPROTO_ICMPV6,
hop_limit=hop_limit)
pkt.add_protocol(ipv6_reply)
icmpv6_reply = icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_ADVERT,
data=icmpv6.nd_neighbor(
dst=src_ip,
option=icmpv6.nd_option_tla(hw_src=eth_src), res=7))
pkt.add_protocol(icmpv6_reply)
pkt.serialize()
return pkt
def icmpv6_echo_reply(eth_src, eth_dst, vid, src_ip, dst_ip, hop_limit,
id_, seq, data):
"""Return IPv6 ICMP echo reply packet.
Args:
eth_src (str): source Ethernet MAC address.
eth_dst (str): destination Ethernet MAC address.
vid (int or None): VLAN VID to use (or None).
src_ip (ipaddress.IPv6Address): source IPv6 address.
dst_ip (ipaddress.IPv6Address): destination IPv6 address.
hop_limit (int): IPv6 hop limit.
id_ (int): identifier for echo reply.
seq (int): sequence number for echo reply.
data (str): payload for echo reply.
Returns:
ryu.lib.packet.ethernet: Serialized IPv6 ICMP echo reply packet.
"""
pkt = build_pkt_header(
eth_src, eth_dst, vid, ether.ETH_TYPE_IPV6)
ipv6_reply = ipv6.ipv6(
src=src_ip,
dst=dst_ip,
nxt=inet.IPPROTO_ICMPV6,
hop_limit=hop_limit)
pkt.add_protocol(ipv6_reply)
icmpv6_reply = icmpv6.icmpv6(
type_=icmpv6.ICMPV6_ECHO_REPLY,
data=icmpv6.echo(id_=id_, seq=seq, data=data))
pkt.add_protocol(icmpv6_reply)
pkt.serialize()
return pkt
| {
"content_hash": "9eae9f6d931ab0403071176021cae148",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 80,
"avg_line_length": 34.00763358778626,
"alnum_prop": 0.6482603815937149,
"repo_name": "isomer/faucet",
"id": "ccf5ec54e37ab697462615c587c075fbe78b2ad0",
"size": "8910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "faucet/valve_packet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1621"
},
{
"name": "Python",
"bytes": "418877"
},
{
"name": "Shell",
"bytes": "702"
}
],
"symlink_target": ""
} |
from asyncio import CancelledError
import vedro.core as core
import vedro.plugins.artifacted as artifacted
import vedro.plugins.assert_rewriter as assert_rewriter
import vedro.plugins.deferrer as deferrer
import vedro.plugins.director as director
import vedro.plugins.interrupter as interrupter
import vedro.plugins.orderer as orderer
import vedro.plugins.repeater as repeater
import vedro.plugins.rerunner as rerunner
import vedro.plugins.seeder as seeder
import vedro.plugins.skipper as skipper
import vedro.plugins.slicer as slicer
import vedro.plugins.tagger as tagger
import vedro.plugins.terminator as terminator
from vedro.core import (
Dispatcher,
Factory,
MonotonicScenarioRunner,
MonotonicScenarioScheduler,
MultiScenarioDiscoverer,
ScenarioDiscoverer,
ScenarioFileFinder,
ScenarioFileLoader,
ScenarioFinder,
ScenarioLoader,
ScenarioOrderer,
ScenarioRunner,
ScenarioScheduler,
Singleton,
)
from vedro.core._scenario_finder._file_filters import (
AnyFilter,
DunderFilter,
ExtFilter,
HiddenFilter,
)
from vedro.core.scenario_orderer import PlainScenarioOrderer
__all__ = ("Config",)
class Config(core.Config):
class Registry(core.Config.Registry):
Dispatcher = Singleton[Dispatcher](Dispatcher)
ScenarioFinder = Factory[ScenarioFinder](lambda: ScenarioFileFinder(
file_filter=AnyFilter([HiddenFilter(), DunderFilter(), ExtFilter(only=["py"])]),
dir_filter=AnyFilter([HiddenFilter(), DunderFilter()])
))
ScenarioLoader = Factory[ScenarioLoader](ScenarioFileLoader)
ScenarioOrderer = Factory[ScenarioOrderer](PlainScenarioOrderer)
ScenarioDiscoverer = Factory[ScenarioDiscoverer](lambda: MultiScenarioDiscoverer(
finder=Config.Registry.ScenarioFinder(),
loader=Config.Registry.ScenarioLoader(),
orderer=Config.Registry.ScenarioOrderer(),
))
ScenarioScheduler = Factory[ScenarioScheduler](MonotonicScenarioScheduler)
ScenarioRunner = Factory[ScenarioRunner](lambda: MonotonicScenarioRunner(
dispatcher=Config.Registry.Dispatcher(),
interrupt_exceptions=(KeyboardInterrupt, SystemExit, CancelledError),
))
class Plugins(core.Config.Plugins):
class Director(director.Director):
enabled = True
class RichReporter(director.RichReporter):
enabled = True
class SilentReporter(director.SilentReporter):
enabled = True
class PyCharmReporter(director.PyCharmReporter):
enabled = True
class Orderer(orderer.Orderer):
enabled = True
class Deferrer(deferrer.Deferrer):
enabled = True
class Artifacted(artifacted.Artifacted):
enabled = True
class Interrupter(interrupter.Interrupter):
enabled = True
class Seeder(seeder.Seeder):
enabled = True
class Skipper(skipper.Skipper):
enabled = True
class Slicer(slicer.Slicer):
enabled = True
class Tagger(tagger.Tagger):
enabled = True
class Repeater(repeater.Repeater):
enabled = True
class Rerunner(rerunner.Rerunner):
enabled = True
class AssertRewriter(assert_rewriter.AssertRewriter):
enabled = True
class Terminator(terminator.Terminator):
enabled = True
| {
"content_hash": "fc444e3ad922b6c4fc36e42e9730d099",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 92,
"avg_line_length": 29.550847457627118,
"alnum_prop": 0.6891310582162318,
"repo_name": "nikitanovosibirsk/vedro",
"id": "f1182c9bf61b094caa4f49f93c4a2c22430a7870",
"size": "3487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vedro/_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1384"
},
{
"name": "Python",
"bytes": "416588"
}
],
"symlink_target": ""
} |
import csv
import os
import pymongo
fips_code = '013'
# Connect to database
client = pymongo.MongoClient(os.environ['DISTRICT_DB'])
db = client.va_district_court_cases
cases = db.cases.find({
'FIPSCode': fips_code,
'date_collected': {'$exists': True}
}, projection = {
'_id': False,
'error': False,
'date_collected': False,
'Hearings': False,
'Services': False,
})
filename = 'district_court_' + fips_code + '.csv'
with open(filename, 'w') as f:
fieldnames = [
'FIPSCode', 'CaseNumber', 'Locality', 'CourtName', 'FiledDate',
'Name', 'AKA1', 'AKA2', 'DOB', 'Gender', 'Race', 'Address',
'OffenseDate', 'ArrestDate', 'Class', 'Status', 'Complainant',
'CaseType', 'Charge', 'CodeSection',
'AmendedCaseType', 'AmendedCharge', 'AmendedCode',
'FinalDisposition', 'DefenseAttorney',
'SentenceTime', 'SentenceSuspendedTime',
'ProbationType', 'ProbationTime', 'ProbationStarts',
'Fine', 'Costs', 'FineCostsDue', 'FineCostsPaid', 'FineCostsPaidDate',
'OperatorLicenseRestrictionCodes', 'OperatorLicenseSuspensionTime',
'RestrictionStartDate', 'RestrictionEndDate', 'VASAP'
]
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(cases)
| {
"content_hash": "6db232264db2177b251808ebe1b21a90",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 35.054054054054056,
"alnum_prop": 0.6445643793369313,
"repo_name": "bschoenfeld/va-court-scraper",
"id": "f2ee072775d45b2e099a5f4d3b680d7f77602ef3",
"size": "1297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "archived/district_court_cases_to_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "43814"
},
{
"name": "Python",
"bytes": "189578"
}
],
"symlink_target": ""
} |
import os
import time
import socket
import random
import threading
from libfuzz.proxy import UdpProxy, TcpProxy
from libfuzz.fuzzer import Fuzzer
# TODO
# 1. Session per saved packet stream
# 2. Use MODE based separation of functionality
# 3. Input module on a separate thread for constant generation in
# 4. Separate modes to separate classes?
# 5. Generation/sniffing/saving with the scapy module?
# 6. Forward_port in different modes?
# 7. Input generation on MODE_GENERATE on separate thread?
# So as to send data continuously or multiple scheduled functions?
# 8. Standardized interface between modules. Input module expects to write to file etc.
# 9. NetworkFuzzer uses (has to use) static instances i.e. 5 sec or until service dies
# if packet capture is also desired.
# 10. Packet capture should only be done on packets that cause something.
# No unnecessary I/O and packets or payloads are tied to Process objects.
# 11. Scapy input module, replay pcap file
# 12. -T or something for specifiable interval for scheduled functions
class NetworkFuzzer(Fuzzer):
"""
Network fuzzer with capability to work either as transparent proxy or
also as process manager by starting services and their input
automatically. All data that needs to be handled should go through
the proxy which will then pass it along channels to the recipient.
The packets that pass through the proxy will be saved individually
as into a packet backlog per session.
When input is self generated, the proxy should use the default port.
Input modules using sockets send all traffic to the default
proxy port.
MODE_TRANSPARENT:
Transparent mode only initializes proxy and data handling
modules (preprocessing and fuzzing). By default the proxy
will listen on port 6000 and forward everything to the
specified port.
MODE_GENERATE:
Generator mode will use the input module to generate input
that is then forwarded to the recipient.
MODE_SERVICE:
Service mode is MODE_TRANSPARENT + one managed service to
which the data is forwarded through the proxy. The managed
service is handled by the ProcessManagement object and can
only handle one service at a time in this mode.
MODE_CONTROLLED:
Controlled mode is the combination of MODE_SERVICE and
MODE_GENERATE with the added benefit that by using randomized
ports (%p), it is possible run dozens or hundreds of instances
concurrently with the ProcessManagement object.
"""
MODE_TRANSPARENT = False
MODE_GENERATE = False
MODE_SERVICE = False
MODE_CONTROLLED = False
def __init__(self, opts):
"""
:opts:
{
pcap: (boolean) Whether to save packets in pcap format,
udp: (boolean) Whether to use UDP proxy (default: TCP),
program: (list) Program and arguments,
jobs: (int) Number of jobs to run concurrently,
timeout: (int) Seconds to give process before killing it,
output_directory: (string) Directory to place found bugs,
input_arguments: (string) Arguments for the input module,
input_module: (string) Module to use for input generation,
fuzzing_module: (string) Module to use for fuzzing the network data,
prerocessing_module: (string) Module to use for preprocessing data,
instrumentation_module: (string) Module to use for instrumentation
}
"""
for opt, value in opts.items():
setattr(self, opt, value)
Fuzzer.__init__(self, opts)
self._tlock = threading.Lock()
self._set_operating_mode()
self._validate_arguments()
if self.udp:
self.proxy = UdpProxy(self.log)
else:
self.proxy = TcpProxy(self.log)
self.proxy.address = self.address
self.proxy.listen_port = self.listen_port
self.proxy.forward_port = self.forward_port
self.proxy.data_handler = self.data_handler
def _validate_arguments(self):
"""
Validate command line arguments.
"""
if not self.forward_port and not self.MODE_CONTROLLED:
self.log.error("No forward port specified.")
self._exit()
elif not self.MODE_CONTROLLED:
self.forward_port = int(self.forward_port)
if not self.listen_port:
self.listen_port = 6000
else:
self.listen_port = int(self.listen_port)
if not self.address:
self.address = "localhost"
if self.MODE_CONTROLLED or self.MODE_SERVICE:
if not self.jobs:
self.jobs = 1
def _exit(self):
self.proxy.running = False
if not self.MODE_TRANSPARENT or not self.MODE_GENERATE:
self.process_management.running = False
def _set_operating_mode(self):
"""
Sets operating mode according to given arguments.
"""
if (self.input_module or self.input_arguments) and self.program:
self.log.info("Running on controlled mode")
self.MODE_CONTROLLED = True
elif self.input_module or self.input_arguments:
self.log.info("Running on input generation mode")
self.MODE_GENERATE = True
elif self.program:
self.log.info("Running on service mode")
self.MODE_SERVICE = True
else:
self.log.info("Running on transparent mode")
self.MODE_TRANSPARENT = True
def _new_process_handler(self, process):
"""
Creates new Process instance that ProcessManagement
can monitor.
In controlled mode NetworkFuzzer will use the
post_start_handler to start each input generator
for each process
"""
self.log.debug("new_process_handler called")
if not self.timeout:
self.timeout = 0
process.time_left = int(self.timeout)
if process.time_left == 0:
process.timeout = False
# For saving network session to file
# file_type = "raw"
# if self.pcap:
# file_type = "pcap"
# process.session = "/tmp/mal-%d.%s" % (process.index, file_type)
# Instead of session files, use packet_queue list
# to log all packets, and if necessary, write them to file.
process.packet_queue = []
# Redirect stderr to per process log file
process.stderr.close()
process.stderr_file = "/tmp/mal-%d.stderr" % process.index
process.stderr = open(process.stderr_file, "w")
# NetworkFuzzer will assume "%p" to
# be listening port for the service and use
# bind("", 0) to bind to any free port.
if "%p" in self.program[0]:
process.port = self._random_port()
with self._tlock:
self.log.debug("Starting new service on port %d" % process.port)
self.proxy.service_ports.insert(0, process.port)
process.program = self.program[0].replace("%p", str(process.port))
process.program = process.program.split()
else:
process.port = self.forward_port
process.program = self.program[0].split()
# On controlled mode, we are responsible
# for the input generation
if self.MODE_CONTROLLED:
process.post_start_handler = self._post_start_handler
return process
def _random_port(self):
return random.randint(10000, 65000)
def _post_start_handler(self, process):
"""
Wait until the service has stopped initializing
itself, and then start the input generator.
"""
self.log.debug("post_start_handler() called")
if "%p" in self.input_arguments:
self.input_arguments = self.input_arguments.replace("%p", str(self.proxy.listen_port))
self.log.debug("Starting input generator")
self.log.debug(self.input_arguments)
time.sleep(1)
self.inputer.generate(self.input_arguments)
def _end_process_handler(self, process):
self.log.debug("Ending process pid: %d" % process.pid)
try:
os.remove("/tmp/mal-%d.stderr" % process.index)
self.log.debug("Removed /tmp/mal-%d.stderr" % process.index)
except:
self.log.error("Could not remove file")
def _test_connection(self, port):
"""
Tries connecting to the service to determine
whether it is ready or not.
Returns true if the connection was established.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as test_socket:
try:
test_socket.connect((self.proxy.address, port))
return True
except:
pass
def _save_packet(self, data, index):
if self.pcap:
self._packet_to_pcap(data, "/tmp/mal-%d.pcap" % index)
else:
self._packet_to_raw(data, "/tmp/mal-%s.raw" % index)
def _packet_to_pcap(self, packet, pcap_file):
pass
def _packet_to_raw(self, packet, raw_file):
with open(raw_file, "ab") as f:
f.write(b"===========================")
f.write(packet)
def _data_handler(self, index, data, processed, malformed):
# self._save_packet(data)
# self._save_packet(malformed_data, index) # Temporary comment
return malformed
def input_generator(self):
"""
This function is only called when input is
specifically generated in self.MODE_GENERATE.
It calls the input module to generate input
and if input is returned, forwards it to the
remote host. Otherwise, it is expected that
the input module creates the necessary input
on its own.
"""
data = self.inputer.generate(self.input_arguments)
if data:
self._send_data(self.data_handler(bytes(data), 0))
def _send_data(self, data):
"""
This function simply forwards data to remote host.
It is merely for expected behaviour with the defalt
input module.
"""
address = (self.address, self.listen_port)
if self.udp:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(address)
if self.udp:
while data:
sock.sendto(self.data_handler(data[:65535], 0), address)
data = data[65535:]
else:
sock.send(self.data_handler(data, 0))
sock.close()
def start(self):
self.proxy.start()
if self.MODE_GENERATE:
self.scheduled_functions.append(self.input_generator)
self.main_loop()
| {
"content_hash": "16a4a2842a30c313bdd7a72c84c90d32",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 98,
"avg_line_length": 35.60126582278481,
"alnum_prop": 0.6040888888888889,
"repo_name": "anhusa/pyfuzz",
"id": "03962a25ce72116b90f5625a816b342006df2467",
"size": "11251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libfuzz/network_fuzzer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "55278"
}
],
"symlink_target": ""
} |
from . import pos_order
| {
"content_hash": "89c0c65ad6b4a55b9fe57abfb38a8ad8",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 23,
"avg_line_length": 24,
"alnum_prop": 0.75,
"repo_name": "it-projects-llc/pos-addons",
"id": "8e9c9c4c75c17230312b468b3c6a03218481fe15",
"size": "77",
"binary": false,
"copies": "1",
"ref": "refs/heads/13.0",
"path": "pos_qr_payments/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "131667"
},
{
"name": "Dockerfile",
"bytes": "330"
},
{
"name": "HTML",
"bytes": "240053"
},
{
"name": "JavaScript",
"bytes": "1277518"
},
{
"name": "Python",
"bytes": "362916"
}
],
"symlink_target": ""
} |
from .create_backup import CreateBackup
assert CreateBackup
| {
"content_hash": "735ee9b97e31076967897c3966ecb42a",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 39,
"avg_line_length": 20.333333333333332,
"alnum_prop": 0.8524590163934426,
"repo_name": "rmyers/trove-dashboard",
"id": "f8f57b274dc9ec3dfee4593ae786d1ca1b7bf071",
"size": "61",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trove_dashboard/database_backups/workflows/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "50932"
}
],
"symlink_target": ""
} |
from datetime import datetime
from lux.models import Schema, fields
from lux.ext.rest import RestRouter, route
from lux.ext.odm import Model
class TokenSchema(Schema):
user = fields.Nested('UserSchema')
class Meta:
model = 'tokens'
class TokenCreateSchema(TokenSchema):
"""Create a new Authorization ``Token`` for the authenticated ``User``.
"""
description = fields.String(required=True, minLength=2, maxLength=256)
class TokenModel(Model):
def get_one(self, session, *filters, **kwargs):
query = self.query(session, *filters, **kwargs)
token = query.one()
query.update({'last_access': datetime.utcnow()},
synchronize_session=False)
return token
class TokenCRUD(RestRouter):
"""
---
summary: Mange user tokens
tags:
- user
- token
"""
model = TokenModel('tokens', TokenSchema)
@route(default_response_schema=[TokenSchema])
def get(self, request):
"""
---
summary: List tokens for a user
responses:
200:
description: List all user tokens matching query filters
"""
return self.model.get_list_response(request)
@route(default_response=201,
default_response_schema=TokenSchema,
body_schema=TokenCreateSchema)
def post(self, request):
"""
---
summary: Create a new token
"""
return self.model.create_response(request)
| {
"content_hash": "e07935aecf530a72b2092e4959452b43",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 75,
"avg_line_length": 25.116666666666667,
"alnum_prop": 0.6098208360982084,
"repo_name": "quantmind/lux",
"id": "0ff774e6e31122b8712c6fc71857b23bd24573fd",
"size": "1507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lux/ext/auth/rest/tokens.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "906"
},
{
"name": "HTML",
"bytes": "5107"
},
{
"name": "JavaScript",
"bytes": "219127"
},
{
"name": "Makefile",
"bytes": "422"
},
{
"name": "Mako",
"bytes": "1050"
},
{
"name": "PLpgSQL",
"bytes": "140"
},
{
"name": "Python",
"bytes": "615221"
},
{
"name": "Shell",
"bytes": "196"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib import admin
from django.views.generic import TemplateView
admin.autodiscover()
# Examples:
# url(r'^$', 'simcon.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
urlpatterns = patterns('simcon.views',
url(r'^$', 'ResearcherView', name="Index"),
url(r'^researcherview/$', 'ResearcherView', name="ResearcherView"),
url(r'^login/$', 'login_page', name="login"),
url(r'^template-wizard/$', 'TemplateWizard', name="TemplateWizard"),
url(r'^generatelink/$', 'GenerateLink', name="GenerateLink"),
url(r'^generatelink/(?P<templateID>\d*)/$', 'GenerateLink', name="GenerateLink_with_templateID"),
url(r'^sharetemplate/$', 'ShareTemplate', name="ShareTemplate"),
url(r'^sharetemplate/(?P<templateID>\d*)/$', 'ShareTemplate', name="ShareTemplate_with_templateID"),
url(r'^shareresponse/$', 'ShareResponse', name="ShareResponse"),
url(r'^shareresponse/(?P<conversationID>\d*)/$', 'ShareResponse', name="ShareResponse_with_responseID"),
url(r'^deleteresponse/(?P<responseNum>\d*)/$', 'DeleteResponse', name="DeleteResponse_by_id"),
url(r'^links/(?P<tempID>\d*)$', 'Links', name="Links"),
url(r'^studentconvostep/$', 'StudentConvoStep', name = 'StudentConvoStep'),
url(r'^studentinfo/$', 'StudentInfo', name = 'StudentInfo'),
url(r'^student/(?P<VKey>\w{10})/$', 'StudentLogin', name = "StudentLogin"),
url(r'^postchoice/$', 'PostChoice', name = 'PostChoice'),
url(r'^posttypedresponse/$', 'PostTypedResponse', name = 'PostTypedResponse'),
url(r'^audio/save$', 'saveAudio', name="SaveAudio"),
url(r'^submission/$', 'SubmitResponse', name = 'Submit'),
url(r'^template-delete/(\d+)$', 'TemplateDelete', name="TemplateDelete"),
url(r'^template-wizard/(\d+)$', 'TemplateWizardEdit', name="TemplateWizardEdit"),
url(r'^template-wizard-save/$', 'TemplateWizardSave', name="TemplateWizardSave"),
url(r'^template-wizard-update', 'TemplateWizardUpdate', name="TemplateWizardUpdate"),
url(r'^template-wizard-left-pane', 'TemplateWizardLeftPane', name="TemplateWizardLeftPane"),
url(r'^template-wizard-right-pane', 'TemplateWizardRightPane', name="TemplateWizardRightPane"),
url(r'^template-save-in-progress', 'TemplateSaveInProgress', name="TemplateSaveInProgress"),
url(r'^template-load-in-progress/(\d+)$', 'TemplateLoadInProgress', name="TemplateLoadInProgress"),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^responses/$', 'Responses', name="Responses"),
url(r'^responses/(\d+)$', 'SingleResponse', name="SingleResponse"),
url(r'logout/$', 'logout_view', name="logout"),
)
# password recovery URLs
urlpatterns += patterns('',
url(r'^user/password/reset/$',
'django.contrib.auth.views.password_reset',
{'post_reset_redirect' : '/user/password/reset/done/', 'from_email': settings.SERVER_EMAIL},
name="admin_password_reset"),
(r'^user/password/reset/done/$',
'django.contrib.auth.views.password_reset_done'),
(r'^user/password/reset/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$',
'django.contrib.auth.views.password_reset_confirm',
{'post_reset_redirect' : '/user/password/done/'}),
(r'^user/password/done/$',
'django.contrib.auth.views.password_reset_complete'),
)
## debug stuff to serve static media TODO make sure this works in development
if settings.DEBUG:
urlpatterns += patterns('',
(r'^' + settings.MEDIA_URL + r'(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
#if settings.DEBUG:
# import debug_toolbar
# urlpatterns += patterns('',
# url(r'^__debug__/', include(debug_toolbar.urls)),
# )
| {
"content_hash": "f64905fd90094c9ec72590e021b932ff",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 108,
"avg_line_length": 53.47222222222222,
"alnum_prop": 0.6612987012987013,
"repo_name": "djorda9/Simulated-Conversations",
"id": "b21c78fa8263df233a8c56b952d088d79cd14f17",
"size": "3850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vagrant/simcon/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "86367"
},
{
"name": "JavaScript",
"bytes": "422785"
},
{
"name": "Python",
"bytes": "134090"
},
{
"name": "Ruby",
"bytes": "4717"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import logging
import os
from mopidy import config, ext
__version__ = '0.1.0'
logger = logging.getLogger(__name__)
class Extension(ext.Extension):
dist_name = 'Mopidy-NFCread'
ext_name = 'nfcread'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
schema['devicepath'] = config.String()
return schema
def setup(self, registry):
from .frontend import NFCread
registry.add('frontend', NFCread)
| {
"content_hash": "aa4360fbb8cc6ccbd4149d7cc0c0c6b6",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 71,
"avg_line_length": 22.548387096774192,
"alnum_prop": 0.642346208869814,
"repo_name": "gefangenimnetz/mopidy-nfcread",
"id": "d8fd06f77802f9abf9598c7a9605e292a1ddfbdd",
"size": "699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mopidy_nfcread/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "753057"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import matplotlib.image as mplimage
import matplotlib as mpl
import os
class ImageViewer(object):
def __init__(self, imfile):
self._load_image(imfile)
self._configure()
self.figure = plt.gcf()
t = "Image: {0}".format(os.path.basename(imfile))
self.figure.suptitle(t, fontsize=20)
self.shape = (3, 2)
def _configure(self):
mpl.rcParams['font.size'] = 10
mpl.rcParams['figure.autolayout'] = False
mpl.rcParams['figure.figsize'] = (9, 6)
mpl.rcParams['figure.subplot.top'] = .9
def _load_image(self, imfile):
self.im = mplimage.imread(imfile)
@staticmethod
def _get_chno(ch):
chmap = {'R': 0, 'G': 1, 'B': 2}
return chmap.get(ch, -1)
def show_channel(self, ch):
bins = 256
ec = 'none'
chno = self._get_chno(ch)
loc = (chno, 1)
ax = plt.subplot2grid(self.shape, loc)
ax.hist(self.im[:, :, chno].flatten(), bins, color=ch, ec=ec,\
label=ch, alpha=.7)
ax.set_xlim(0, 255)
plt.setp(ax.get_xticklabels(), visible=True)
plt.setp(ax.get_yticklabels(), visible=False)
plt.setp(ax.get_xticklines(), visible=True)
plt.setp(ax.get_yticklines(), visible=False)
plt.legend()
plt.grid(True, axis='y')
return ax
def show(self):
loc = (0, 0)
axim = plt.subplot2grid(self.shape, loc, rowspan=3)
axim.imshow(self.im)
plt.setp(axim.get_xticklabels(), visible=False)
plt.setp(axim.get_yticklabels(), visible=False)
plt.setp(axim.get_xticklines(), visible=False)
plt.setp(axim.get_yticklines(), visible=False)
axr = self.show_channel('R')
axg = self.show_channel('G')
axb = self.show_channel('B')
plt.show()
if __name__ == '__main__':
im = 'images/yellow_flowers.jpg'
try:
iv = ImageViewer(im)
iv.show()
except Exception as ex:
print ex | {
"content_hash": "a5ae16b5126f400b8cc85d615c7117ea",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 70,
"avg_line_length": 29.15714285714286,
"alnum_prop": 0.5658990690837825,
"repo_name": "pletisan/python-data-viz-cookbook",
"id": "ddd6d231d7d311ff3f9c6e85936fcb889feefb86",
"size": "2041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "3367OS_Code/3367OS_06_Code/ch06/ch06_rec02_01_figimage.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from django.conf.urls import url, include
from whippedcream.tests.urls import v1_api, noname_api
urlpatterns = [
url(r'^api/', include(v1_api.urls)),
url(r'^apinoname/', include(noname_api.urls))
]
| {
"content_hash": "9ac8fdeaa3feef05b310b9325a4c3f58",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 54,
"avg_line_length": 26,
"alnum_prop": 0.7019230769230769,
"repo_name": "paulcwatts/django-whippedcream",
"id": "0f6aeb4c90050597ea7eeb6954573c8bd8e67bce",
"size": "208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "whippedcream/runtests/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "22141"
}
],
"symlink_target": ""
} |
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUUsersFetcher
from .fetchers import NUEventLogsFetcher
from bambou import NURESTObject
class NUGroup(NURESTObject):
""" Represents a Group in the VSD
Notes:
Identifies a group within an enterprise
"""
__rest_name__ = "group"
__resource_name__ = "groups"
## Constants
CONST_ROLE_ADMINOPERATOR = "ADMINOPERATOR"
CONST_ROLE_NETCONFMGR = "NETCONFMGR"
CONST_MANAGEMENT_MODE_CMS = "CMS"
CONST_ROLE_ORGNETWORKDESIGNER = "ORGNETWORKDESIGNER"
CONST_ROLE_CMS = "CMS"
CONST_ROLE_UNKNOWN = "UNKNOWN"
CONST_ROLE_PREACTIVATION = "PREACTIVATION"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ROLE_STATS = "STATS"
CONST_ROLE_CSPOPERATOR = "CSPOPERATOR"
CONST_ROLE_USER = "USER"
CONST_MANAGEMENT_MODE_RESERVED = "RESERVED"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_ROLE_JMS = "JMS"
CONST_ROLE_ORGUSER = "ORGUSER"
CONST_ROLE_CSPROOT = "CSPROOT"
CONST_ROLE_SYSTEM = "SYSTEM"
CONST_ROLE_POSTACTIVATION = "POSTACTIVATION"
CONST_ROLE_SECURITYADMINISTRATOR = "SECURITYADMINISTRATOR"
CONST_ROLE_ORGADMIN = "ORGADMIN"
CONST_MANAGEMENT_MODE_DEFAULT = "DEFAULT"
def __init__(self, **kwargs):
""" Initializes a Group instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> group = NUGroup(id=u'xxxx-xxx-xxx-xxx', name=u'Group')
>>> group = NUGroup(data=my_dict)
"""
super(NUGroup, self).__init__()
# Read/Write Attributes
self._ldap_group_dn = None
self._name = None
self._management_mode = None
self._last_updated_by = None
self._account_restrictions = None
self._description = None
self._restriction_date = None
self._entity_scope = None
self._role = None
self._private = None
self._external_id = None
self.expose_attribute(local_name="ldap_group_dn", remote_name="LDAPGroupDN", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="management_mode", remote_name="managementMode", attribute_type=str, is_required=False, is_unique=False, choices=[u'CMS', u'DEFAULT', u'RESERVED'])
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="account_restrictions", remote_name="accountRestrictions", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="restriction_date", remote_name="restrictionDate", attribute_type=float, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="role", remote_name="role", attribute_type=str, is_required=False, is_unique=False, choices=[u'ADMINOPERATOR', u'CMS', u'CSPOPERATOR', u'CSPROOT', u'JMS', u'NETCONFMGR', u'ORGADMIN', u'ORGNETWORKDESIGNER', u'ORGUSER', u'POSTACTIVATION', u'PREACTIVATION', u'SECURITYADMINISTRATOR', u'STATS', u'SYSTEM', u'UNKNOWN', u'USER'])
self.expose_attribute(local_name="private", remote_name="private", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.users = NUUsersFetcher.fetcher_with_object(parent_object=self, relationship="member")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def ldap_group_dn(self):
""" Get ldap_group_dn value.
Notes:
The LDAP distinguished name (DN) for the group.
This attribute is named `LDAPGroupDN` in VSD API.
"""
return self._ldap_group_dn
@ldap_group_dn.setter
def ldap_group_dn(self, value):
""" Set ldap_group_dn value.
Notes:
The LDAP distinguished name (DN) for the group.
This attribute is named `LDAPGroupDN` in VSD API.
"""
self._ldap_group_dn = value
@property
def name(self):
""" Get name value.
Notes:
A unique name of the group
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
A unique name of the group
"""
self._name = value
@property
def management_mode(self):
""" Get management_mode value.
Notes:
Management mode of the user object - allows for override of external authorization and syncup
This attribute is named `managementMode` in VSD API.
"""
return self._management_mode
@management_mode.setter
def management_mode(self, value):
""" Set management_mode value.
Notes:
Management mode of the user object - allows for override of external authorization and syncup
This attribute is named `managementMode` in VSD API.
"""
self._management_mode = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def account_restrictions(self):
""" Get account_restrictions value.
Notes:
Determines whether group is disabled or not.
This attribute is named `accountRestrictions` in VSD API.
"""
return self._account_restrictions
@account_restrictions.setter
def account_restrictions(self, value):
""" Set account_restrictions value.
Notes:
Determines whether group is disabled or not.
This attribute is named `accountRestrictions` in VSD API.
"""
self._account_restrictions = value
@property
def description(self):
""" Get description value.
Notes:
Description of the group
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
Description of the group
"""
self._description = value
@property
def restriction_date(self):
""" Get restriction_date value.
Notes:
When the group was disabled.
This attribute is named `restrictionDate` in VSD API.
"""
return self._restriction_date
@restriction_date.setter
def restriction_date(self, value):
""" Set restriction_date value.
Notes:
When the group was disabled.
This attribute is named `restrictionDate` in VSD API.
"""
self._restriction_date = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def role(self):
""" Get role value.
Notes:
The role associated with this group.
"""
return self._role
@role.setter
def role(self, value):
""" Set role value.
Notes:
The role associated with this group.
"""
self._role = value
@property
def private(self):
""" Get private value.
Notes:
A private group is visible only by the owner of the group. Public groups are visible by all users in the enterprise
"""
return self._private
@private.setter
def private(self, value):
""" Set private value.
Notes:
A private group is visible only by the owner of the group. Public groups are visible by all users in the enterprise
"""
self._private = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| {
"content_hash": "1fb8d7f3d604e159aa820996462c6ffe",
"timestamp": "",
"source": "github",
"line_count": 415,
"max_line_length": 364,
"avg_line_length": 27.327710843373495,
"alnum_prop": 0.5600035270258354,
"repo_name": "nuagenetworks/vspk-python",
"id": "8c29a30d76d38b8a4721b8551da297dba88e459c",
"size": "12954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vspk/v5_0/nugroup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12909327"
}
],
"symlink_target": ""
} |
from __future__ import division
from auto_gen_objects import Object, Property, Choice
SQL_TYPE = 'sql'
class SQLObject(Object):
@staticmethod
def convert(obj):
if SQL_TYPE in obj.layouts:
obj.__class__ = SQLObject
for prop in obj.properties:
prop.__class__ = SQLProperty
for choice in obj.choices:
choice.__class__ = SQLChoice
for prop in choice.properties:
prop.__class__ = SQLProperty
def getName(self):
try:
return self.layouts[SQL_TYPE]['table']
except KeyError:
pass
return Object.getName(self)
def getSQLFields(self):
return self.getSQLProperties() + self.getSQLChoices()
def getSQLProperties(self):
return [p for p in self.properties if p.hasSpec()]
def getSQLChoices(self):
return [c for c in self.choices if c.hasSpec()]
def getSQLInverses(self):
return [f for f in self.getSQLFields() if f.isInverse()]
def getSQLInverseRefs(self):
return [f for f in self.getSQLFields()
if f.isInverse() and f.isReference()]
def getSQLColumns(self):
return [f.getColumn() for f in self.getSQLFields()]
def getSQLReferences(self):
return ([p for p in self.properties
if p.isReference() and not p.isInverse()] +
[c for c in self.choices
if c.isReference() and not c.isInverse()])
def getNormalSQLColumns(self):
return [p for p in self.properties if not p.isInverse() and \
not p.isPrimaryKey() and not p.isReference() and \
p.hasSpec()]
def getNormalSQLColumnsAndKey(self):
return self.getNormalSQLColumns() + [self.getKey()]
def getSQLConstructorPairs(self):
return [(f.getRegularName(), f.getRegularName())
for f in self.getNormalSQLColumnsAndKey()]
def getSQLColumnsAndKey(self):
return [p.getColumnn() for p in self.getNormalSQLColumnsAndKey()]
def getSQLReferenceProperties(self):
return [p for p in self.properties
if not p.isInverse() and p.isReference()]
def getSQLForeignKeys(self):
return [p for p in self.properties
if p.isInverse() and p.isForeignKey()]
def getSQLReferencedField(self, refObj):
if refObj is not None:
# find inverse
for refProp in refObj.properties:
if refProp.isReference() and \
refProp.isInverse() and \
refProp.getReference() == self.getRegularName():
return (refProp, False)
for choice in refObj.choices:
for refProp in self.getSQLPropertiesForChoice(choice):
if refProp.isReference() and \
refProp.getReference() == self.getRegularName():
return (choice, True)
return (None, False)
def get_sql_referenced(self, ref_obj, inverse=False):
if ref_obj is not None:
for ref_prop in ref_obj.properties:
if not (inverse ^ ref_prop.isInverse()) and \
ref_prop.isReference() and \
ref_prop.getReference() == self.getRegularName():
return (ref_prop, False)
for choice in ref_obj.choices:
if inverse ^ choice.isInverse():
continue
for ref_prop in choice.properties:
if ref_prop.isReference() and \
ref_prop.getReference() == self.getRegularName():
return (choice, True)
raise RuntimeError("didn't work", ref_obj.getRegularName(),
self.getRegularName())
class SQLProperty (Property):
def hasSpec(self):
try:
sqlSpec = self.specs[SQL_TYPE]
return True
except KeyError:
pass
return False
def getName(self):
try:
return self.specs[SQL_TYPE]['name']
except KeyError:
pass
return Property.getName(self)
def getColumn(self):
try:
return self.specs[SQL_TYPE]['column']
except KeyError:
pass
return self.getName()
def getType(self):
try:
return self.specs[SQL_TYPE]['type']
except KeyError:
pass
return 'int'
def getGlobalName(self):
try:
return self.specs[SQL_TYPE]['globalName']
except KeyError:
pass
return ''
def isText(self):
if string.find(self.getType().upper(), 'CHAR') != -1 or \
string.find(self.getType().upper(), 'DATE') != -1:
return True
return False
def isAutoInc(self):
try:
# FIXME include "and isPrimaryKey()" ?
return self.specs[SQL_TYPE]['autoInc'] == 'true' and \
self.isPrimaryKey()
except KeyError:
pass
return False
def isGlobal(self):
try:
return self.specs[SQL_TYPE]['global'] == 'true'
except KeyError:
pass
return False
class SQLChoice(Choice):
def hasSpec(self):
if self.properties[0].hasSpec():
return True
return False
def getSpec(self):
if self.hasSpec():
return self.properties[0]
return None
def getColumn(self):
for property in self.properties:
if property.hasSpec():
break
try:
return property.specs[SQL_TYPE]['column']
except KeyError:
pass
return self.getName()
def isGlobal(self):
try:
return self.properties[0].specs[SQL_TYPE]['global'] == 'true'
except KeyError:
pass
return False
def getGlobalName(self):
try:
return self.properties[0].specs[SQL_TYPE]['globalName']
except KeyError:
pass
return ''
def getSQLProperties(self):
return [p for p in self.properties if p.hasSpec()]
def convert(objects):
sql_objects = []
for obj in objects:
if SQL_TYPE in obj.layouts:
SQLObject.convert(obj)
sql_objects.append(obj)
return sql_objects
| {
"content_hash": "9ac7dacdb02ca624b6502748c8737085",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 77,
"avg_line_length": 31.129186602870814,
"alnum_prop": 0.5431909007070397,
"repo_name": "hjanime/VisTrails",
"id": "ed472410070ef4b6672cd17722dbe3e176183091",
"size": "8420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vistrails/db/bin/sql_gen_objects.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19550"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19803915"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "Shell",
"bytes": "35024"
},
{
"name": "TeX",
"bytes": "145333"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
from sqlalchemy import MetaData, String, Table
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
quotas = Table('quotas', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Tables
#
instance_metadata_table = Table('instance_metadata', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_id',
Integer(),
ForeignKey('instances.id'),
nullable=False),
Column('key',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('value',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
#
# New columns
#
quota_metadata_items = Column('metadata_items', Integer())
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
for table in (instance_metadata_table, ):
try:
table.create()
except Exception:
LOG.info(repr(table))
LOG.exception('Exception while creating table')
raise
quotas.create_column(quota_metadata_items)
| {
"content_hash": "d2fc86826b448016e285aa3a6eccbc0d",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 77,
"avg_line_length": 31.65573770491803,
"alnum_prop": 0.6343863283272916,
"repo_name": "russellb/nova",
"id": "a5c25b876ad536b36916a2234738be521330968a",
"size": "2612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4974"
},
{
"name": "JavaScript",
"bytes": "7412"
},
{
"name": "Python",
"bytes": "5611148"
},
{
"name": "Shell",
"bytes": "25380"
}
],
"symlink_target": ""
} |
from collections import Counter
from typing import List
import torch
def align_bpe_to_words(roberta, bpe_tokens: torch.LongTensor, other_tokens: List[str]):
"""
Helper to align GPT-2 BPE to other tokenization formats (e.g., spaCy).
Args:
roberta (RobertaHubInterface): RoBERTa instance
bpe_tokens (torch.LongTensor): GPT-2 BPE tokens of shape `(T_bpe)`
other_tokens (List[str]): other tokens of shape `(T_words)`
Returns:
List[str]: mapping from *other_tokens* to corresponding *bpe_tokens*.
"""
assert bpe_tokens.dim() == 1
assert bpe_tokens[0] == 0
def clean(text):
return text.strip()
# remove whitespaces to simplify alignment
bpe_tokens = [roberta.task.source_dictionary.string([x]) for x in bpe_tokens]
bpe_tokens = [clean(roberta.bpe.decode(x) if x not in {'<s>', ''} else x) for x in bpe_tokens]
other_tokens = [clean(str(o)) for o in other_tokens]
# strip leading <s>
bpe_tokens = bpe_tokens[1:]
assert ''.join(bpe_tokens) == ''.join(other_tokens)
# create alignment from every word to a list of BPE tokens
alignment = []
bpe_toks = filter(lambda item: item[1] != '', enumerate(bpe_tokens, start=1))
j, bpe_tok = next(bpe_toks)
for other_tok in other_tokens:
bpe_indices = []
while True:
if other_tok.startswith(bpe_tok):
bpe_indices.append(j)
other_tok = other_tok[len(bpe_tok):]
try:
j, bpe_tok = next(bpe_toks)
except StopIteration:
j, bpe_tok = None, None
elif bpe_tok.startswith(other_tok):
# other_tok spans multiple BPE tokens
bpe_indices.append(j)
bpe_tok = bpe_tok[len(other_tok):]
other_tok = ''
else:
raise Exception('Cannot align "{}" and "{}"'.format(other_tok, bpe_tok))
if other_tok == '':
break
assert len(bpe_indices) > 0
alignment.append(bpe_indices)
assert len(alignment) == len(other_tokens)
return alignment
def align_features_to_words(roberta, features, alignment):
"""
Align given features to words.
Args:
roberta (RobertaHubInterface): RoBERTa instance
features (torch.Tensor): features to align of shape `(T_bpe x C)`
alignment: alignment between BPE tokens and words returned by
func:`align_bpe_to_words`.
"""
assert features.dim() == 2
bpe_counts = Counter(j for bpe_indices in alignment for j in bpe_indices)
assert bpe_counts[0] == 0 # <s> shouldn't be aligned
denom = features.new([bpe_counts.get(j, 1) for j in range(len(features))])
weighted_features = features / denom.unsqueeze(-1)
output = [weighted_features[0]]
largest_j = -1
for bpe_indices in alignment:
output.append(weighted_features[bpe_indices].sum(dim=0))
largest_j = max(largest_j, *bpe_indices)
for j in range(largest_j + 1, len(features)):
output.append(weighted_features[j])
output = torch.stack(output)
assert torch.all(torch.abs(output.sum(dim=0) - features.sum(dim=0)) < 1e-4)
return output
def spacy_nlp():
if getattr(spacy_nlp, '_nlp', None) is None:
try:
from spacy.lang.en import English
spacy_nlp._nlp = English()
except ImportError:
raise ImportError('Please install spacy with: pip install spacy')
return spacy_nlp._nlp
def spacy_tokenizer():
if getattr(spacy_tokenizer, '_tokenizer', None) is None:
try:
nlp = spacy_nlp()
spacy_tokenizer._tokenizer = nlp.Defaults.create_tokenizer(nlp)
except ImportError:
raise ImportError('Please install spacy with: pip install spacy')
return spacy_tokenizer._tokenizer
| {
"content_hash": "931db5fc4d653b1575d31ed20521b1b3",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 98,
"avg_line_length": 35.41818181818182,
"alnum_prop": 0.606776180698152,
"repo_name": "hfp/libxsmm",
"id": "45d2e37194c0f66e2b063884d7f3291ae48ece0f",
"size": "4074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/deeplearning/sparse_training/fairseq/fairseq/models/roberta/alignment_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3115"
},
{
"name": "C",
"bytes": "8335143"
},
{
"name": "C++",
"bytes": "84416"
},
{
"name": "CSS",
"bytes": "242"
},
{
"name": "Fortran",
"bytes": "102021"
},
{
"name": "HTML",
"bytes": "390"
},
{
"name": "JavaScript",
"bytes": "1062"
},
{
"name": "Makefile",
"bytes": "158870"
},
{
"name": "Python",
"bytes": "36612"
},
{
"name": "Shell",
"bytes": "84205"
},
{
"name": "Starlark",
"bytes": "882"
}
],
"symlink_target": ""
} |
from distutils import cygwinccompiler
from distutils import extension
from distutils import util
import errno
import os
import os.path
import pkg_resources
import platform
import re
import shlex
import shutil
import sys
import sysconfig
import setuptools
from setuptools.command import build_ext
# TODO(atash) add flag to disable Cython use
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.abspath('.'))
import protoc_lib_deps
import grpc_version
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache Software License',
]
PY3 = sys.version_info.major == 3
# Environment variable to determine whether or not the Cython extension should
# *use* Cython or use the generated C files. Note that this requires the C files
# to have been generated by building first *with* Cython support.
BUILD_WITH_CYTHON = os.environ.get('GRPC_PYTHON_BUILD_WITH_CYTHON', False)
# There are some situations (like on Windows) where CC, CFLAGS, and LDFLAGS are
# entirely ignored/dropped/forgotten by distutils and its Cygwin/MinGW support.
# We use these environment variables to thus get around that without locking
# ourselves in w.r.t. the multitude of operating systems this ought to build on.
# We can also use these variables as a way to inject environment-specific
# compiler/linker flags. We assume GCC-like compilers and/or MinGW as a
# reasonable default.
EXTRA_ENV_COMPILE_ARGS = os.environ.get('GRPC_PYTHON_CFLAGS', None)
EXTRA_ENV_LINK_ARGS = os.environ.get('GRPC_PYTHON_LDFLAGS', None)
if EXTRA_ENV_COMPILE_ARGS is None:
EXTRA_ENV_COMPILE_ARGS = '-std=c++11'
if 'win32' in sys.platform:
if sys.version_info < (3, 5):
# We use define flags here and don't directly add to DEFINE_MACROS below to
# ensure that the expert user/builder has a way of turning it off (via the
# envvars) without adding yet more GRPC-specific envvars.
# See https://sourceforge.net/p/mingw-w64/bugs/363/
if '32' in platform.architecture()[0]:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s -D_hypot=hypot'
else:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64 -D_hypot=hypot'
else:
# We need to statically link the C++ Runtime, only the C runtime is
# available dynamically
EXTRA_ENV_COMPILE_ARGS += ' /MT'
elif "linux" in sys.platform or "darwin" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -fno-wrapv -frtti'
if EXTRA_ENV_LINK_ARGS is None:
EXTRA_ENV_LINK_ARGS = ''
if "linux" in sys.platform or "darwin" in sys.platform:
EXTRA_ENV_LINK_ARGS += ' -lpthread'
elif "win32" in sys.platform and sys.version_info < (3, 5):
msvcr = cygwinccompiler.get_msvcr()[0]
# TODO(atash) sift through the GCC specs to see if libstdc++ can have any
# influence on the linkage outcome on MinGW for non-C++ programs.
EXTRA_ENV_LINK_ARGS += (
' -static-libgcc -static-libstdc++ -mcrtdll={msvcr} '
'-static'.format(msvcr=msvcr))
EXTRA_COMPILE_ARGS = shlex.split(EXTRA_ENV_COMPILE_ARGS)
EXTRA_LINK_ARGS = shlex.split(EXTRA_ENV_LINK_ARGS)
CC_FILES = [os.path.normpath(cc_file) for cc_file in protoc_lib_deps.CC_FILES]
PROTO_FILES = [
os.path.normpath(proto_file) for proto_file in protoc_lib_deps.PROTO_FILES
]
CC_INCLUDE = os.path.normpath(protoc_lib_deps.CC_INCLUDE)
PROTO_INCLUDE = os.path.normpath(protoc_lib_deps.PROTO_INCLUDE)
GRPC_PYTHON_TOOLS_PACKAGE = 'grpc_tools'
GRPC_PYTHON_PROTO_RESOURCES_NAME = '_proto'
DEFINE_MACROS = ()
if "win32" in sys.platform:
DEFINE_MACROS += (('WIN32_LEAN_AND_MEAN', 1),)
if '64bit' in platform.architecture()[0]:
DEFINE_MACROS += (('MS_WIN64', 1),)
elif "linux" in sys.platform or "darwin" in sys.platform:
DEFINE_MACROS += (('HAVE_PTHREAD', 1),)
# By default, Python3 distutils enforces compatibility of
# c plugins (.so files) with the OSX version Python3 was built with.
# For Python3.4, this is OSX 10.6, but we need Thread Local Support (__thread)
if 'darwin' in sys.platform and PY3:
mac_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if mac_target and (pkg_resources.parse_version(mac_target) <
pkg_resources.parse_version('10.9.0')):
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'
os.environ['_PYTHON_HOST_PLATFORM'] = re.sub(
r'macosx-[0-9]+\.[0-9]+-(.+)', r'macosx-10.9-\1',
util.get_platform())
def package_data():
tools_path = GRPC_PYTHON_TOOLS_PACKAGE.replace('.', os.path.sep)
proto_resources_path = os.path.join(tools_path,
GRPC_PYTHON_PROTO_RESOURCES_NAME)
proto_files = []
for proto_file in PROTO_FILES:
source = os.path.join(PROTO_INCLUDE, proto_file)
target = os.path.join(proto_resources_path, proto_file)
relative_target = os.path.join(GRPC_PYTHON_PROTO_RESOURCES_NAME,
proto_file)
try:
os.makedirs(os.path.dirname(target))
except OSError as error:
if error.errno == errno.EEXIST:
pass
else:
raise
shutil.copy(source, target)
proto_files.append(relative_target)
return {GRPC_PYTHON_TOOLS_PACKAGE: proto_files}
def extension_modules():
if BUILD_WITH_CYTHON:
plugin_sources = [os.path.join('grpc_tools', '_protoc_compiler.pyx')]
else:
plugin_sources = [os.path.join('grpc_tools', '_protoc_compiler.cpp')]
plugin_sources += [
os.path.join('grpc_tools', 'main.cc'),
os.path.join('grpc_root', 'src', 'compiler', 'python_generator.cc')
] + [os.path.join(CC_INCLUDE, cc_file) for cc_file in CC_FILES]
plugin_ext = extension.Extension(
name='grpc_tools._protoc_compiler',
sources=plugin_sources,
include_dirs=[
'.',
'grpc_root',
os.path.join('grpc_root', 'include'),
CC_INCLUDE,
],
language='c++',
define_macros=list(DEFINE_MACROS),
extra_compile_args=list(EXTRA_COMPILE_ARGS),
extra_link_args=list(EXTRA_LINK_ARGS),
)
extensions = [plugin_ext]
if BUILD_WITH_CYTHON:
from Cython import Build
return Build.cythonize(extensions)
else:
return extensions
setuptools.setup(
name='grpcio-tools',
version=grpc_version.VERSION,
description='Protobuf code generator for gRPC',
author='The gRPC Authors',
author_email='grpc-io@googlegroups.com',
url='https://grpc.io',
license='Apache License 2.0',
classifiers=CLASSIFIERS,
ext_modules=extension_modules(),
packages=setuptools.find_packages('.'),
install_requires=[
'protobuf>=3.5.0.post1',
'grpcio>={version}'.format(version=grpc_version.VERSION),
],
package_data=package_data(),
)
| {
"content_hash": "182b27c179b170b92b5721688a1c5fd1",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 118,
"avg_line_length": 38.857142857142854,
"alnum_prop": 0.6497821350762527,
"repo_name": "thinkerou/grpc",
"id": "c13dfe9ade589aa2cf4fc11ec256eee87cace511",
"size": "7922",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/distrib/python/grpcio_tools/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "31177"
},
{
"name": "C",
"bytes": "1651907"
},
{
"name": "C#",
"bytes": "1723819"
},
{
"name": "C++",
"bytes": "29784655"
},
{
"name": "CMake",
"bytes": "548015"
},
{
"name": "CSS",
"bytes": "1519"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "Dockerfile",
"bytes": "141732"
},
{
"name": "Go",
"bytes": "27069"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Java",
"bytes": "6907"
},
{
"name": "JavaScript",
"bytes": "51278"
},
{
"name": "M4",
"bytes": "45321"
},
{
"name": "Makefile",
"bytes": "1143240"
},
{
"name": "Objective-C",
"bytes": "284919"
},
{
"name": "Objective-C++",
"bytes": "37952"
},
{
"name": "PHP",
"bytes": "468767"
},
{
"name": "Python",
"bytes": "2408483"
},
{
"name": "Ruby",
"bytes": "963396"
},
{
"name": "Shell",
"bytes": "403820"
},
{
"name": "Swift",
"bytes": "3435"
},
{
"name": "XSLT",
"bytes": "9673"
}
],
"symlink_target": ""
} |
import os
from cabot.cabotapp.alert import AlertPlugin
from cabot.plugin_test_utils import PluginTestCase
from mock import patch, call
from cabot.cabotapp.models import Service, UserProfile
from cabot_alert_pagerduty import models
class TestPagerdutyAlerts(PluginTestCase):
def setUp(self):
super(TestPagerdutyAlerts, self).setUp()
self.alert = AlertPlugin.objects.get(title=models.PagerdutyAlert.name)
self.service.alerts.add(self.alert)
self.service.save()
self.plugin = models.PagerdutyAlert.objects.get()
# self.user's service key is user_key
models.PagerdutyAlertUserData.objects.create(user=self.user.profile, service_key='user_key')
def test_critical_alertable(self):
""" A service with a critical status is alertable """
self.service.overall_status = self.service.CRITICAL_STATUS
self.assertTrue(self.plugin._service_alertable(self.service))
def test_non_critical_alertable(self):
""" A non-critical service status does not alert """
for status in Service.WARNING_STATUS, Service.ERROR_STATUS:
self.service.overall_status = status
self.assertFalse(self.plugin._service_alertable(self.service))
def test_default_critical_status(self):
os.environ.pop('PAGERDUTY_ALERT_STATUS', None)
default_alert_status = ['CRITICAL']
self.assertEqual(default_alert_status, models._gather_alertable_status())
def test_default_status_in_plugin(self):
os.environ.pop('PAGERDUTY_ALERT_STATUS', None)
default_alert_status = ['CRITICAL']
self.assertEqual(default_alert_status, self.plugin.alert_status_list)
def test_configured_status_in_plugin(self):
os.environ['PAGERDUTY_ALERT_STATUS'] = 'CRITICAL,WARNING'
custom_alert_status = ['CRITICAL', 'WARNING']
self.assertEqual(custom_alert_status, self.plugin.alert_status_list)
@patch('cabot_alert_pagerduty.models.pygerduty.PagerDuty')
def test_trigger_and_resolve(self, fake_client_class):
resolve_incident = fake_client_class.return_value.resolve_incident
trigger_incident = fake_client_class.return_value.trigger_incident
self.transition_service_status(Service.PASSING_STATUS, Service.CRITICAL_STATUS)
trigger_incident.assert_called_once_with('user_key', 'Service: Service is CRITICAL',
incident_key='service/2194')
self.transition_service_status(Service.CRITICAL_STATUS, Service.PASSING_STATUS)
resolve_incident.assert_called_once_with('user_key', 'service/2194')
@patch('cabot_alert_pagerduty.models.pygerduty.PagerDuty')
def test_alert_multiple_keys(self, fake_client_class):
trigger_incident = fake_client_class.return_value.trigger_incident
# self.fallback_officer's key is fallback_key, alert self.user and self.fallback_officer
models.PagerdutyAlertUserData.objects.create(user=self.fallback_officer.profile, service_key='fallback_key')
self.service.users_to_notify.add(self.fallback_officer)
self.transition_service_status(Service.PASSING_STATUS, Service.CRITICAL_STATUS)
trigger_incident.assert_has_calls([
call('user_key', 'Service: Service is CRITICAL', incident_key='service/2194'),
call('fallback_key', 'Service: Service is CRITICAL', incident_key='service/2194'),
])
@patch('cabot_alert_pagerduty.models.pygerduty.PagerDuty')
def test_missing_profile(self, fake_client_class):
"""this is an oddly specific test case, but there was a bug"""
trigger_incident = fake_client_class.return_value.trigger_incident
# self.user has a key, but duty officer has no userdata *or profile*
# should just gather keys from self.user
UserProfile.objects.filter(user=self.duty_officer).delete()
self.service.users_to_notify.add(self.duty_officer)
self.transition_service_status(Service.PASSING_STATUS, Service.CRITICAL_STATUS)
trigger_incident.assert_has_calls([
call('user_key', 'Service: Service is CRITICAL', incident_key='service/2194'),
])
| {
"content_hash": "34bc02a709c629f01f8fee8e52c16c25",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 116,
"avg_line_length": 47.70454545454545,
"alnum_prop": 0.6986660314435446,
"repo_name": "Affirm/cabot-alert-pagerduty",
"id": "5f2a68ca55ca6c06dcd9fa3c5478066a8ec9d4c4",
"size": "4222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cabot_alert_pagerduty/tests/test_pagerduty.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9164"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_service_status import V1ServiceStatus
class TestV1ServiceStatus(unittest.TestCase):
""" V1ServiceStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ServiceStatus(self):
"""
Test V1ServiceStatus
"""
model = kubernetes.client.models.v1_service_status.V1ServiceStatus()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "572613846fb400c5fb7c94ee3ba45ff0",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 105,
"avg_line_length": 21.05,
"alnum_prop": 0.6912114014251781,
"repo_name": "skuda/client-python",
"id": "50bc07885ce1feea14be3d07840dd417ca788ee3",
"size": "859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/test/test_v1_service_status.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5907789"
},
{
"name": "Shell",
"bytes": "8195"
}
],
"symlink_target": ""
} |
"""
Purpose
Demonstrate basic message operations in Amazon Simple Queue Service (Amazon SQS).
"""
# snippet-start:[python.example_code.sqs.message_wrapper_imports]
import logging
import sys
import boto3
from botocore.exceptions import ClientError
import queue_wrapper
logger = logging.getLogger(__name__)
sqs = boto3.resource('sqs')
# snippet-end:[python.example_code.sqs.message_wrapper_imports]
# snippet-start:[python.example_code.sqs.SendMessage]
def send_message(queue, message_body, message_attributes=None):
"""
Send a message to an Amazon SQS queue.
:param queue: The queue that receives the message.
:param message_body: The body text of the message.
:param message_attributes: Custom attributes of the message. These are key-value
pairs that can be whatever you want.
:return: The response from SQS that contains the assigned message ID.
"""
if not message_attributes:
message_attributes = {}
try:
response = queue.send_message(
MessageBody=message_body,
MessageAttributes=message_attributes
)
except ClientError as error:
logger.exception("Send message failed: %s", message_body)
raise error
else:
return response
# snippet-end:[python.example_code.sqs.SendMessage]
# snippet-start:[python.example_code.sqs.SendMessageBatch]
def send_messages(queue, messages):
"""
Send a batch of messages in a single request to an SQS queue.
This request may return overall success even when some messages were not sent.
The caller must inspect the Successful and Failed lists in the response and
resend any failed messages.
:param queue: The queue to receive the messages.
:param messages: The messages to send to the queue. These are simplified to
contain only the message body and attributes.
:return: The response from SQS that contains the list of successful and failed
messages.
"""
try:
entries = [{
'Id': str(ind),
'MessageBody': msg['body'],
'MessageAttributes': msg['attributes']
} for ind, msg in enumerate(messages)]
response = queue.send_messages(Entries=entries)
if 'Successful' in response:
for msg_meta in response['Successful']:
logger.info(
"Message sent: %s: %s",
msg_meta['MessageId'],
messages[int(msg_meta['Id'])]['body']
)
if 'Failed' in response:
for msg_meta in response['Failed']:
logger.warning(
"Failed to send: %s: %s",
msg_meta['MessageId'],
messages[int(msg_meta['Id'])]['body']
)
except ClientError as error:
logger.exception("Send messages failed to queue: %s", queue)
raise error
else:
return response
# snippet-end:[python.example_code.sqs.SendMessageBatch]
# snippet-start:[python.example_code.sqs.ReceiveMessage]
def receive_messages(queue, max_number, wait_time):
"""
Receive a batch of messages in a single request from an SQS queue.
:param queue: The queue from which to receive messages.
:param max_number: The maximum number of messages to receive. The actual number
of messages received might be less.
:param wait_time: The maximum time to wait (in seconds) before returning. When
this number is greater than zero, long polling is used. This
can result in reduced costs and fewer false empty responses.
:return: The list of Message objects received. These each contain the body
of the message and metadata and custom attributes.
"""
try:
messages = queue.receive_messages(
MessageAttributeNames=['All'],
MaxNumberOfMessages=max_number,
WaitTimeSeconds=wait_time
)
for msg in messages:
logger.info("Received message: %s: %s", msg.message_id, msg.body)
except ClientError as error:
logger.exception("Couldn't receive messages from queue: %s", queue)
raise error
else:
return messages
# snippet-end:[python.example_code.sqs.ReceiveMessage]
# snippet-start:[python.example_code.sqs.DeleteMessage]
def delete_message(message):
"""
Delete a message from a queue. Clients must delete messages after they
are received and processed to remove them from the queue.
:param message: The message to delete. The message's queue URL is contained in
the message's metadata.
:return: None
"""
try:
message.delete()
logger.info("Deleted message: %s", message.message_id)
except ClientError as error:
logger.exception("Couldn't delete message: %s", message.message_id)
raise error
# snippet-end:[python.example_code.sqs.DeleteMessage]
# snippet-start:[python.example_code.sqs.DeleteMessageBatch]
def delete_messages(queue, messages):
"""
Delete a batch of messages from a queue in a single request.
:param queue: The queue from which to delete the messages.
:param messages: The list of messages to delete.
:return: The response from SQS that contains the list of successful and failed
message deletions.
"""
try:
entries = [{
'Id': str(ind),
'ReceiptHandle': msg.receipt_handle
} for ind, msg in enumerate(messages)]
response = queue.delete_messages(Entries=entries)
if 'Successful' in response:
for msg_meta in response['Successful']:
logger.info("Deleted %s", messages[int(msg_meta['Id'])].receipt_handle)
if 'Failed' in response:
for msg_meta in response['Failed']:
logger.warning(
"Could not delete %s",
messages[int(msg_meta['Id'])].receipt_handle
)
except ClientError:
logger.exception("Couldn't delete messages from queue %s", queue)
else:
return response
# snippet-end:[python.example_code.sqs.DeleteMessageBatch]
# snippet-start:[python.example_code.sqs.Scenario_SendReceiveBatch]
def usage_demo():
"""
Shows how to:
* Read the lines from this Python file and send the lines in
batches of 10 as messages to a queue.
* Receive the messages in batches until the queue is empty.
* Reassemble the lines of the file and verify they match the original file.
"""
def pack_message(msg_path, msg_body, msg_line):
return {
'body': msg_body,
'attributes': {
'path': {'StringValue': msg_path, 'DataType': 'String'},
'line': {'StringValue': str(msg_line), 'DataType': 'String'}
}
}
def unpack_message(msg):
return (msg.message_attributes['path']['StringValue'],
msg.body,
int(msg.message_attributes['line']['StringValue']))
print('-'*88)
print("Welcome to the Amazon Simple Queue Service (Amazon SQS) demo!")
print('-'*88)
queue = queue_wrapper.create_queue('sqs-usage-demo-message-wrapper')
with open(__file__) as file:
lines = file.readlines()
line = 0
batch_size = 10
received_lines = [None]*len(lines)
print(f"Sending file lines in batches of {batch_size} as messages.")
while line < len(lines):
messages = [pack_message(__file__, lines[index], index)
for index in range(line, min(line + batch_size, len(lines)))]
line = line + batch_size
send_messages(queue, messages)
print('.', end='')
sys.stdout.flush()
print(f"Done. Sent {len(lines) - 1} messages.")
print(f"Receiving, handling, and deleting messages in batches of {batch_size}.")
more_messages = True
while more_messages:
received_messages = receive_messages(queue, batch_size, 2)
print('.', end='')
sys.stdout.flush()
for message in received_messages:
path, body, line = unpack_message(message)
received_lines[line] = body
if received_messages:
delete_messages(queue, received_messages)
else:
more_messages = False
print('Done.')
if all([lines[index] == received_lines[index] for index in range(len(lines))]):
print(f"Successfully reassembled all file lines!")
else:
print(f"Uh oh, some lines were missed!")
queue.delete()
print("Thanks for watching!")
print('-'*88)
# snippet-end:[python.example_code.sqs.Scenario_SendReceiveBatch]
if __name__ == '__main__':
usage_demo()
| {
"content_hash": "dac253bd607b081be96c1d23b4502dd2",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 87,
"avg_line_length": 35.87755102040816,
"alnum_prop": 0.6237770193401593,
"repo_name": "awsdocs/aws-doc-sdk-examples",
"id": "035b204d6d2fede0e0af4261fece4617c327350d",
"size": "8898",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/example_code/sqs/message_wrapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "476653"
},
{
"name": "Batchfile",
"bytes": "900"
},
{
"name": "C",
"bytes": "3852"
},
{
"name": "C#",
"bytes": "2051923"
},
{
"name": "C++",
"bytes": "943634"
},
{
"name": "CMake",
"bytes": "82068"
},
{
"name": "CSS",
"bytes": "33378"
},
{
"name": "Dockerfile",
"bytes": "2243"
},
{
"name": "Go",
"bytes": "1764292"
},
{
"name": "HTML",
"bytes": "319090"
},
{
"name": "Java",
"bytes": "4966853"
},
{
"name": "JavaScript",
"bytes": "1655476"
},
{
"name": "Jupyter Notebook",
"bytes": "9749"
},
{
"name": "Kotlin",
"bytes": "1099902"
},
{
"name": "Makefile",
"bytes": "4922"
},
{
"name": "PHP",
"bytes": "1220594"
},
{
"name": "Python",
"bytes": "2507509"
},
{
"name": "Ruby",
"bytes": "500331"
},
{
"name": "Rust",
"bytes": "558811"
},
{
"name": "Shell",
"bytes": "63776"
},
{
"name": "Swift",
"bytes": "267325"
},
{
"name": "TypeScript",
"bytes": "119632"
}
],
"symlink_target": ""
} |
import collections
from .. import converters
from .. import namespace
from .. import option
#------------------------------------------------------------------------------
def setup_definitions(source, destination):
for key, val in source.items():
if key.startswith('__'):
continue # ignore these
val_type = type(val)
if val_type == option.Option:
destination[key] = val
if not val.name:
val.name = key
val.set_value(val.default)
elif isinstance(val, collections.Mapping):
if 'name' in val and 'default' in val:
# this is an Option in the form of a dict, not a Namespace
params = converters.str_dict_keys(val)
destination[key] = option.Option(**params)
else:
# this is a Namespace
if key not in destination:
try:
destination[key] = namespace.Namespace(doc=val._doc)
except AttributeError:
destination[key] = namespace.Namespace()
# recurse!
setup_definitions(val, destination[key])
elif val_type in [int, float, str, unicode]:
destination[key] = option.Option(name=key,
doc=key,
default=val)
| {
"content_hash": "08cb97b5b5dcf8d48b2ca469729e4f7f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 79,
"avg_line_length": 40.42857142857143,
"alnum_prop": 0.48197879858657244,
"repo_name": "twobraids/configman_orginal",
"id": "9428577c4968a7302a6533dc5f34999345b9612e",
"size": "3128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "configman/def_sources/for_mappings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "257212"
}
],
"symlink_target": ""
} |
from termcolor import colored
from time import strftime, localtime
class OutputResult:
OK = ('+', 'green', ['bold'])
Fail = ('-', 'red', ['bold'])
Info = ('i', 'blue', ['bold'])
Warn = ('WRN', 'yellow', ['bold'])
Error = ('ERR', 'red', ['bold'])
Log = ('LOG', None, ['bold'])
class Output:
logging = False
quiet = False
date_format = "%x %X %z"
log_file = None
file_resource = None
@staticmethod
def ok(message: str, use_time: bool = False):
Output.do(message, OutputResult.OK, use_time)
@staticmethod
def fail(message: str, use_time: bool = False):
Output.do(message, OutputResult.Fail, use_time)
@staticmethod
def warn(message: str, use_time: bool = False):
Output.do(message, OutputResult.Warn, use_time)
@staticmethod
def err(message: str, use_time: bool = False):
Output.do(message, OutputResult.Error, use_time)
@staticmethod
def do(message: str, result: tuple = OutputResult.Info, use_time: bool = False, ret: bool = False):
if Output.log_file and Output.file_resource is None:
Output.file_resource = open(Output.log_file, 'w+')
if Output.quiet:
return
date_time = ""
if use_time:
date_time = '[%s]' % strftime(Output.date_format, localtime())
message_type = '[%s]' % colored(result[0], result[1], attrs=result[2])
message_result = '%s%s %s' % (message_type, date_time, message)
if Output.log_file:
message_result_text = '[%s]%s %s' % (result[0], date_time, message)
Output.file_resource.write(message_result_text + '\n')
if ret:
return message_result
print(message_result)
@staticmethod
def log(message: str):
if Output.quiet:
return
if Output.logging:
Output.do(message, result=OutputResult.Log)
| {
"content_hash": "7ba42fbfaf3d5ee36a2115b94a021494",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 103,
"avg_line_length": 28,
"alnum_prop": 0.5791925465838509,
"repo_name": "vdjagilev/desefu",
"id": "59b38e76931e1011528abca220acaa9370c33282",
"size": "1932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kernel/output.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44309"
}
],
"symlink_target": ""
} |
import argparse
import sys
class AtomicMutation(object):
"""Base class for mutations that focus on one thing."""
def __init__(self, action="forward"):
self.action = action
def __call__(self, arguments=sys.argv[1:], program=sys.argv[0]):
parser = argparse.ArgumentParser(prog=program,
description=self.__class__.__doc__)
self.arguments = parser.parse_args(arguments)
action = getattr(self, self.action)
try:
action()
except:
raise
sys.exit(0)
def forward(self):
"""Upgrade."""
raise NotImplementedError()
def backward(self):
"""Downgrade."""
raise NotImplementedError()
def forward_smoketest(self):
"""Quickly assert that forward expectations are met."""
def forward_diagnostic(self):
"""Check forward expectations and produce detailed diagnostic."""
| {
"content_hash": "08c80d06b8e37e3653bdcc166e6f7daf",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 76,
"avg_line_length": 28.90909090909091,
"alnum_prop": 0.5859538784067087,
"repo_name": "mozilla-services/transmutator",
"id": "2118aecf4c38a5c85f0c7dd5c8ff346a19b31005",
"size": "954",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "transmutator/mutations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "17370"
}
],
"symlink_target": ""
} |
"""
dbspatial.py: SQLite-SpatiaLite support for db.py
Copyright (c) 2017 Garin Wally
MIT License; see LICENSE
Forked from dslw in November 2017
"""
import fnmatch
import inspect
import os
import re
import sys
import urllib2
from sqlite3 import IntegrityError, OperationalError
import numpy as np
import pandas as pd
import geopandas as gpd
import sqlparse
from openpyxl import load_workbook
from shapely.wkt import loads as wkt_loads
import db
# Turn off scientific notation for the love of GOD!!!
np.set_printoptions(suppress=True)
# TODO: rm pd.set_option('display.float_format', lambda x: '%.4f' % x)
_meta_prefixes = [
u'ElementaryGeometries',
u'SpatialIndex',
u'gcau',
u'gcfi',
u'gcs',
u'gctm',
u'geom',
u'geometry',
u'ggi',
u'ggu',
u'gid', #
u'gii', #
u'giu', #
u'idx',
u'spatial',
u'spatialite',
u'sql',
u'sqlite',
u'tmd',
u'tmi',
u'tmu',
u'vector',
u'views',
u'virts',
u'vtgc',
u'vtgcau',
u'vtgcfi',
u'vtgcs',
u'vwgc',
u'vwgcau',
u'vwgcfi',
u'vwgcs'
]
class _Config(object):
def __init__(self):
self.LOWERCASE_FIELDS = True
self.AUTO_SPATIAL_INDEX = True
self._initial_dec_len = 4
@property
def decimal_length(self):
return self._initial_dec_len
@decimal_length.setter
def decimal_length(self, length):
if not isinstance(length, int):
raise AttributeError("Length must be int")
self._initial_dec_len = length
s = "%.{}f".format(self._initial_dec_len)
pd.set_option('display.float_format', lambda x: s % x)
return
config = _Config()
class ToggleVerbose(object):
def __init__(self, verbose=False):
"""Toggles sys.stderr from printing to console vs to void.
Args:
verbose (bool): whether to print to console or not (default False)
"""
# To write to console or not?
self.verbose = verbose
# Whatever the current sys.stderr is set to
self.original = sys.stderr
# A null buffer i.e. the void to write to
self.void = open(os.devnull, "w")
if not self.verbose:
self.toggle_off()
def toggle_off(self):
"""Turns writing to console off."""
if self.verbose is False:
sys.stderr = self.void
return
def toggle_on(self):
"""Turns writing to console on."""
if self.verbose is False:
sys.stderr = self.original
return
def __enter__(self):
self.toggle_off()
return
def __exit__(self, exc_type, exc_val, exc_tb):
self.toggle_on()
return
def _clean_columns(df):
# Replace whitespace with underscores
df.columns = [re.sub("\W", "_", col) for col in df.columns.tolist()]
# Column names that start with numbers cause SQL Errors
if any([col[0].isdigit() for col in df.columns]):
raise KeyError("Dataframe column names cannot begin with numbers")
# TODO: Raise exception on keyword field name e.g. insert
# Lowercase field names (set in config; default True)
if config.LOWERCASE_FIELDS:
df.columns = [col.lower() for col in df.columns]
return
# TODO: add to namespace class?
def AddGeometryColumn(table, srid, shape, geom_col='geometry', dim='XY',
enforce_null=False):
"""Builds an AddGeometryColumn SQL statement.
Args:
table (str): table to add geometry column to
srid (int): spatial reference id
shape (str): shape type (e.g. Point, PolyLine)
geom_col (str): name of new geometry column (default: 'geometry')
dim (int): number of spatial dimensions (default: 'XY')
enforce_null (int or bool): enforce NOT NULL constraint (default False)
Returns SQL query (unicode string).
"""
add_qry = "SELECT AddGeometryColumn('{0}', '{1}', {2}, '{3}', '{4}', {5});"
dim = dim.replace("'", "")
add_qry = add_qry.format(
table, geom_col, srid, shape, dim, int(enforce_null))
return unicode(add_qry)
def get_proj4(srid, authority="esri"):
"""Gets proj4 text via SRID from spatialreference.org.
Args:
srid (int): Spatial Reference ID.
authority (str, optional): 'epsg', 'sr-org', or 'esri' (default);
automatically tries 'epsg' if 'esri' causes 404 error.
Available as scalar 'InsertSrid'.
"""
base_url = "http://spatialreference.org/ref/{}/{}/proj4/"
url = base_url.format(authority, srid)
try:
return unicode(urllib2.urlopen(url).read().decode("utf8"))
except urllib2.HTTPError:
# Try again with epsg
authority = "epsg"
url = base_url.format(authority, srid)
return unicode(urllib2.urlopen(url).read().decode("utf8"))
class SpatialDB(db.DB):
def __init__(self, filename, verbose=True):
"""SpatiaLite database."""
self.verbose = verbose
with ToggleVerbose(self.verbose):
db.DB.__init__(self, filename=filename, dbtype="sqlite",
extensions=["mod_spatialite"])
self.con.isolation_level = None
# TODO: if metadata_tables...
self.cur.execute("SELECT InitSpatialMetaData(1);")
sys.stderr.write("Spatial Metadata Intialized.\n")
# TODO: self._create_additial_metadata()
self.update_schema()
#self.create_function("RegisterSpatialView", self.RegisterSpatialView)
self.create_function("ExportXLSX", self.export_excel)
def load_extended_functions(self):
pass # TODO:
#for ...
def _create_additial_metadata(self):
"""Creates tables that are not part of the standard metadata tables."""
# Dimensions
self.cur.execute("""CREATE TABLE IF NOT EXISTS dimensions (
dim_name, dimensions);""")
dims = [
("XY", 2),
("XYZ", 3),
("XYM", 3),
("XYZM", 4)]
self.cur.executemany(
"INSERT OR IGNORE INTO dimensions VALUES (?, ?);", dims)
# Geometry Types
self.cur.execute("""CREATE TABLE IF NOT EXISTS geometry_types (
type_name, typeid, dimensions);""")
# TODO: I don't think these are right...
gtypes = [
("POINT", 1, 2),
("POINTZ", 3001, 3),
("POINTM", 3001, 3),
("POINTZM", 4001, 4),
("LINESTRING", 2, 2),
("LINESTRINGZ", 3002, 3),
("LINESTRINGM", 3002, 3),
("LINESTRINGZM", 4002, 4),
("POLYGON", 3, 2),
("POLYGONZ", 3003, 3),
("POLYGONM", 3003, 3),
("POLYGONZM", 3004, 4),
("MULTIPOLYGONZM", 3006, 4), # I (think I) know this is right...
("MULTIPOINT", 4, 2),
("MULTIPOINTZ", 3004, 3),
("MULTIPOINTM", 3004, 3),
("MULTIPOINTZM", 4004, 4),
("MULTILINESTRING", 5, 2),
("MULTILINESTRINGZ", 3005, 3),
("MULTILINESTRINGM", 3005, 3),
("MULTILINESTRINGZM", 4005, 4),
("MULTIPOLYGON", 6, 2),
("MULTIPOLYGONZ", 3006, 3),
("MULTIPOLYGONM", 3006, 3),
("MULTIPOLYGONZM", 4006, 4),
("GEOMETRYCOLLECTION", 7, 2),
("GEOMETRYCOLLECTIONZ", 3007, 3),
("GEOMETRYCOLLECTIONZM", 3007, 3),
("GEOMETRYCOLLECTIONZM", 4007, 4),
("GEOMETRY", 8, 2),
("GEOMETRYZ", 3008, 3),
("GEOMETRYM", 3008, 3),
("GEOMETRYZM", 4008, 4)]
self.cur.executemany(
"INSERT OR IGNORE INTO geometry_types VALUES (?, ?, ?);", gtypes)
return
def create_function(self, name, func): # TODO: make decorator
"""Smart-Alias for self.con.create_function."""
args = inspect.getargspec(func).args
if "self" in args:
args.remove("self")
num_args = len(args)
with ToggleVerbose(self.verbose):
return self.con.create_function(name, num_args, func)
def create_index(self, table, column):
"""A method to create spatial or non-spatial indexes."""
with ToggleVerbose(self.verbose):
if config.LOWERCASE_FIELDS:
table = table.lower()
column = column.lower()
if column.lower() != "geometry":
return self.cur.execute(
"CREATE INDEX idx_{tbl}_{col} ON {tbl} ({col});".format(
tbl=table, col=column)).fetchone()
elif column == "geometry" and table in self.geo_tables:
return self.cur.execute(
"SELECT CreateSpatialIndex(?, 'geometry')",
(table,)).fetchone()
else:
return 0
def execute_script_file(self, filename, data=None):
"""Alias for self.cur.executescript(); supports handlebars queries."""
with open(filename) as f:
q = f.read()
if data:
q = self._apply_handlebars(q, data, True)
with ToggleVerbose(self.verbose):
return self.cur.executescript(q)
def insert_srid(self, srid, authority="esri"):
"""INSERTs sr data into spatial_ref_sys from spatialreference.org.
Args:
srid (int): Spatial Reference ID to INSERT.
authority (str, optional): 'epsg', 'sr-org', or 'esri' (default);
automatically tries 'epsg' if 'esri' causes 404 error.
Available as scalar 'InsertSrid'.
"""
base_url = "http://spatialreference.org/ref/{}/{}/postgis/"
url = base_url.format(authority, srid)
try:
insert = unicode(urllib2.urlopen(url).read().decode("utf8"))
except urllib2.HTTPError:
# Try again with epsg
authority = "epsg"
url = base_url.format(authority, srid)
insert = unicode(urllib2.urlopen(url).read().decode("utf8"))
except urllib2.URLError as e:
print("Check internet connection.")
raise e
name = re.findall('PROJCS\["(.*)",GEOGCS', insert)[0].replace("_", " ")
try:
with ToggleVerbose(self.verbose):
self.cur.execute(insert) # ESRI's DLL gave me a BusyError...
except IntegrityError:
return False
update_sql = ("UPDATE spatial_ref_sys SET ref_sys_name = ? "
"WHERE auth_srid = ?")
# It looks like PostGIS adds a 9 to the srid, so we need to change that
update_srid = ("UPDATE spatial_ref_sys SET srid = auth_srid "
"WHERE auth_name = ?")
with ToggleVerbose(self.verbose):
self.cur.execute(update_sql, (name, srid))
self.cur.execute(update_srid, (authority,))
return True
def has_srid(self, srid):
"""Checks spatial_ref_sys table for input srid.
Args:
srid (int): spatial reference id to check if exists
Available as scalar 'HasSrid'.
"""
check = "SELECT ref_sys_name FROM spatial_ref_sys WHERE auth_srid = ?"
if not self.cur.execute(check, (srid,)).fetchone():
return False
return True
def geo_query(self, query, data=None):
"""Return spatial query as a geopandas.GeoDataFrame."""
# Format the query
query = sqlparse.format(query, reindent=True, keyword_case='upper')
# Add these for user's convenience
if "wkt" not in query and "srid" not in query:
# Get location of 'FROM'
idx = query.find("FROM")
# Part to add
s = ", AsText(geometry) AS wkt, SRID(geometry) AS srid "
# Insert wkt and srid statements into query before 'FROM'
query = query[:idx] + s + query[idx:]
if "wkt" not in query:
raise AttributeError(
"Query does not return geometry as 'wkt' field")
if "srid" not in query:
raise AttributeError("Query does not return 'srid' field")
# Apply handlebars, eg. fill in {{these}} with values
if data:
query = self._apply_handlebars(query, data, True)
# NULL geometries will raise WindowsError
try:
df = pd.read_sql(query, self.con)
# `WindowsError: exception: access violation reading 0x00000000`
except WindowsError: # TODO: not working
raise OperationalError("NULL geometries cannot be displayed")
if df.empty:
raise AttributeError("Query returned no results")
# Drop Shape, Shape_len, etc.
[df.drop(shp_col, axis=1, inplace=True) for shp_col
in df.columns if re.findall("(?i)Shape|geometry", shp_col)]
# Make geometry column as WKT that is loaded as shapely object(s)
df["geometry"] = df["wkt"].apply(wkt_loads)
df.drop("wkt", axis=1, inplace=True)
# Convert pandas.DataFrame to geopandas.GeoDataFrame
gdf = gpd.GeoDataFrame(df)
# Get SRID
srid = list(set(df["srid"]))[0]
df.drop("srid", axis=1, inplace=True)
gdf.crs = get_proj4(srid)
return gdf
def geo_query_from_file(self, filename, data=None):
"""Execute query file; supports handlebars queries."""
with open(filename) as f:
q = f.read()
if data:
q = self._apply_handlebars(q, data, True)
return self.geo_query(q)
def get_columns(self, table):
"""Returns a list of column names for a specified table."""
return self.query(
"PRAGMA table_info({{tbl}});",
data={"tbl": table})["name"].tolist()
def get_column_type(self, table_name, column_name):
pragma = "PRAGMA table_info({{tbl}})"
df = self.query(pragma, data={"tbl": table_name})
return df.set_index("name").ix[column_name]["type"]
def find_metadata_table(self, search):
"""List table names within the database.
Returns a sorted list of tables initalized by SpatiaLite.
"""
# Get all table names from current db
return fnmatch.filter(self.metadata_tables, search)
@property
def all_tables(self):
q = "SELECT name FROM sqlite_master WHERE type IN ('table', 'view');"
tables = self.query(q)["name"].tolist()
tables = list(set(tables).difference(self.metadata_tables))
tables.sort()
return tables
@property
def metadata_tables(self): # TODO: return as prettytable
# col_meta, table_meta = self._get_db_metadata(False, False)
# tables = self._gen_tables_from_col_tuples(col_meta)
tables = [r[0] for r in
self.cur.execute("SELECT name FROM sqlite_master;")
if r[0].split("_")[0] in _meta_prefixes]
tables.sort()
return tables
@property
def geometries(self):
"""Returns a dict of spatial tables and their properties."""
gd = self.query("SELECT * FROM geometry_columns")
return gd.set_index("f_table_name").to_dict('index')
rows = self.cur.execute("SELECT * FROM geometry_columns").fetchall()
cols = [r[0] for r in self.cur.description]
gd = {}
for row in rows:
od = dict(zip(cols, row))
ix = od["f_table_name"]
gd[ix] = od
return gd
@property
def geo_tables(self):
"""A list of spatial table names."""
return self.geometries.keys()
@property
def geo_views(self):
"""A list of spatial view names."""
return self.query(
("SELECT view_name "
"FROM views_geometry_columns"))["view_name"].tolist()
# =========================================================================
# SELF-ALTERATION METHODS
def RegisterSpatialView(self, view_name, view_geometry, f_table_name,
f_geometry_column, view_rowid):
"""Register a spatial view.
Args:
view_name (str): name of view to register
view_geometry (str): geometry column name of view
f_table_name (str): name of table the view references to get geom
f_geometry_column (str): name of geometry column in f_table
Available as scalar 'RegisterSpatialView'.
"""
# Resource: www.gaia-gis.it/spatialite-3.0.0-BETA/spatialite-cookbook
# /html/sp-view.html
sql = ("INSERT INTO views_geometry_columns "
" (view_name, view_geometry, view_rowid, f_table_name, "
" f_geometry_column, read_only) "
" VALUES ('{}', '{}', '{}', '{}', '{}', 1);")
sql = unicode(sql.format(view_name, view_geometry, view_rowid,
f_table_name, f_geometry_column))
self.cur.execute(sql)
code = self.cur.fetchone()
return code
def drop_table(self, table_name):
"""Method to drop a table (and related indexes)."""
r = self.cur.execute("SELECT DropGeoTable(?)",
(table_name,)).fetchone()
self.cur.execute("VACUUM")
self.update_schema()
return r
# TODO: too slow, use CloneTable or something
# TODO: somehow rename inplace
def rename_fields(self, table_name, old_new_names_dict):
"""Renames a table's fields using a dictionary."""
if type(old_new_names_dict) is not dict:
raise TypeError(
"Must use dictionary to rename fields. {} used.".format(
str(type(old_new_names_dict))))
gdf = self.geo_query(
"SELECT * FROM {{tbl}};", data={"tbl": table_name})
gdf.rename(columns=old_new_names_dict, inplace=True)
self.drop_table(table_name)
self.load_features(table_name, gdf=gdf)
return
def drop_fields(self, table_name, drop_list=[], keep_list=[]):
"""Uses CloneTable to drop fields."""
clone_name = "{}_1".format(table_name)
# Get existing column names
columns = self.get_columns(table_name)
# Get the difference of what to keep and what exists
if keep_list:
# Keeping geometry column is implicit when useing keep_fields
keep_list.append("geometry")
drop_list = filter(
lambda x: x if x not in keep_list and x in columns else False,
columns)
# Just check that what was supplied exists
else:
drop_list = filter(
lambda x: x if x in columns else False, drop_list)
# Make a string of what to drop
# e.g. "'::ignore::field1', '::ignore::field2'"
drops = ", ".join(
["'::ignore::{}'".format(drop) for drop in drop_list])
# Clone the table performing the field drops
self.cur.execute(
"SELECT CloneTable('main', ?, ?, 0, ?);",
(table_name, clone_name, drops))
# Stop everything if the clone doesn't exist
if clone_name not in self.all_tables:
raise OperationalError("Table not cloned :(")
# Stop everything if the clone table is empty
# TODO: make function?
try:
self.query(
"SELECT * FROM {{tbl}} LIMIT 1",
data={"tbl": table_name}).iloc[0]
except IndexError:
raise OperationalError("No records in clone")
# Drop the original
self.cur.execute("SELECT DropGeoTable(?);", (table_name,))
# Rename the new table by cloning the new one with the original name
self.cur.execute(
"SELECT CloneTable('main', ?, ?, 0);",
(clone_name, table_name))
# Drop the old clone
self.cur.execute("SELECT DropGeoTable(?);", (clone_name,))
return
# =========================================================================
# I/O METHODS
def load_dataframe(self, table_name, dataframe):
"""Imports a pandas.DataFrame into SQLite."""
with ToggleVerbose(self.verbose):
df = dataframe.copy()
_clean_columns(df)
if "wkt" in df.columns:
df["geometry"] = df["wkt"].apply(wkt_loads)
df.drop("wkt", axis=1, inplace=True)
srid = df["srid"].ix[0]
df = gpd.GeoDataFrame(df)
self.load_features(table_name, gdf=df, srid=srid)
return
df.to_sql(table_name, self.con, if_exists="replace", index=False)
del df
self.update_schema()
return
def load_excel(self, path, worksheet=0, table_name="", sheet_name=""):
"""Imports an .xlsx or .xls sheet into SQLite.
Args:
path (str): path to Excel file; table name defaults to file name
worksheet (int): worksheet (number) to import
table_name (str): optionally rename the imported sheet
sheet_name (str): import sheet by name rather than number
Available as scalar 'ImportExcel'.
"""
# Use sheet_name to lookup worksheet number
if sheet_name:
xl = pd.ExcelFile(path)
worksheet = xl.sheet_names.index(sheet_name)
xl.close()
# Read xlsx as dataframe
df = pd.read_excel(path, worksheet)
# Output table name
if not table_name:
if sheet_name:
table_name = sheet_name
else:
table_name = os.path.basename(path).split(".")[0]
# Column names
self.load_dataframe(table_name, df)
return
def load_features(self, table_name, path="", gdf=None, srid='DEFAULT',
if_exists="replace"):
"""Loads a file or GeoDataFrame into SpatiaLite via geopandas."""
with ToggleVerbose(self.verbose):
if path:
gdf = gpd.read_file(path)
elif gdf is None:
raise AttributeError("Either a path or GeoDataFrame is required.")
_clean_columns(gdf)
if srid == "DEFAULT":
if "+proj" in gdf.crs:
srid = self.query(
("SELECT auth_srid "
"FROM spatial_ref_sys "
"WHERE proj4text = '{{proj4}}'"),
data={"proj4": gdf.crs}).ix[0][0]
elif type(gdf.crs) is dict:
srid = int(re.findall("\d+", gdf.crs["init"])[0])
else:
raise AttributeError("SRID not found. Please supply.")
# Remove NULL geometries
gdf = gdf[~gdf["geometry"].isnull()]
# Assume the longest is the appropriate geometry type
# e.g. MultiPolygon > Polygon
geom_type = max([t for t in list(set(gdf["geometry"].geom_type))],
key=len)
# Convert to WKT
gdf["wkt"] = gdf["geometry"].apply(lambda x: x.to_wkt())
# Drop Shapely field
gdf.drop("geometry", axis=1, inplace=True)
if "srid" in gdf.columns:
gdf.drop("srid", axis=1, inplace=True)
tmp_table = table_name + "_tmp"
# Pandas tries to handle this, but with spatial tables and indexes
# involved, we might as well beat em to the punch and do it right
if if_exists.lower() == "replace" and table_name in self.geo_tables:
self.cur.execute(
"SELECT DropGeoTable('{}');".format(table_name))
gdf.to_sql(tmp_table, self.con, if_exists=if_exists, index=False)
# Re-spatialize
sys.stderr.write("Creating {}; geom_type {}; srid {}\n".format(
table_name, geom_type, srid))
add_q = AddGeometryColumn(tmp_table, srid, geom_type)
self.cur.execute(add_q)
# TODO: is this duplicating most of self.recast_table?
try:
e = None
self.cur.execute(add_q)
update_q = """
UPDATE {tbl}
SET geometry = MakeValid(
CastTo{gtype}(GeomFromText(wkt, {srid})))"""
self.cur.execute(
update_q.format(tbl=tmp_table, gtype=geom_type, srid=srid))
# Clone temp table without 'wkt' field
self.cur.execute("SELECT CloneTable('main', ?, ?, 1, "
"'::ignore::wkt');", (tmp_table, table_name))
except Exception as e:
pass
finally:
self.cur.execute("SELECT DropGeoTable(?)", (tmp_table,))
if e:
raise e
self.update_schema()
if config.AUTO_SPATIAL_INDEX:
self.cur.execute(
"SELECT CreateSpatialIndex(?, 'geometry')", (table_name,))
self.cur.execute("VACUUM")
return
# TODO: add reprojection & SRID setting
def recast_geometry(self, table_name, geom_type="AUTO", dim="XY",
# TODO: srid="AUTO"
drop_fields=[], drop_table=True, rename=""):
"""Replaces a table with recasted (type/dimension) and validated geometry."""
# Get column names
cols = self.query(
"SELECT * FROM {{tbl}}",
data={"tbl": table_name}).columns.tolist()
# Don't include 'geometry' in query
cols.pop([col.lower() for col in cols].index("geometry"))
# Query to get cleaned and valid geometry
geom_q = """
SELECT
{{cols}},
SRID(geometry) AS srid,
AsText(
CastTo{{geometry_type}}(
CastTo{{coord_dimension}}(
MakeValid(geometry)))) AS wkt
FROM {{tbl}}
WHERE geometry IS NOT NULL;"""
# Get attributes of the current geometry column
attr_q = """
SELECT
f_geometry_column,
geometry_type,
coord_dimension
FROM geometry_columns
WHERE f_table_name='{{tbl}}';"""
# Convert dataframe to dict of geom attrs
attrs = self.query(attr_q, data={"tbl": table_name}).iloc[0].to_dict()
# Add table name to dict
attrs["tbl"] = table_name
# Add string-ified column name list to dict
attrs["cols"] = ", ".join(cols)
# Update attr values to user values
if geom_type != "AUTO":
attrs["geometry_type"] = geom_type
if dim != "AUTO":
attrs["coord_dimension"] = dim
# Run the query
clean_gdf = self.geo_query(geom_q, data=attrs)
# Drop from database
if drop_table and (rename != table_name and rename != ""):
self.cur.execute("SELECT DropGeoTable('{}');".format(table_name))
# Set new name
if rename != "":
table_name = rename
# Load the cleaned table
self.load_features(table_name, gdf=clean_gdf)
return
# TODO: change order to match SpatiaLite Reference; filename then table
def import_shp(self, table_name, path, charset="UTF-8", srid=-1):
"""Wraps SpatiaLite's ImportSHP() for importing shp file to table."""
# TODO: read contents of .cpg file if exists
import_q = "SELECT ImportSHP(?, ?, ?, ?);"
if path.endswith(".shp"):
path = path.replace(".shp", "")
# Try to intellegently get the srid from prj file, if exists
prj = path + ".prj".replace("\\", "/")
if os.path.exists(prj):
with open(prj, "r") as f:
# Parse contents for ref_sys_name
ref_sys_name = re.findall(
'(?<=")(.*?)(?="\,)',
f.read())[0].replace("_", " ")
srid_q = "SELECT srid FROM spatial_ref_sys WHERE ref_sys_name = ?;"
srid = self.cur.execute(srid_q, (ref_sys_name,)).fetchone()[0]
# Intellegently get the charset from .cpg file, if exists
cpg = path + ".cpg".replace("\\", "/")
if os.path.exists(cpg):
with open(cpg, "r") as f:
charset = f.read()
# Load it baby
with ToggleVerbose(self.verbose):
self.cur.execute(import_q, (path, table_name, charset, srid))
return self.cur.fetchone()
def import_dbf(self, filename, table, charset="UTF-8"):
"""Wraps SpatiaLite's ImportDBF() for importing dbf files to table."""
return self.cur.execute(
*SpatiaLite.ImportDBF(filename, table, charset))
def export_shp(self, table, out_path, charset="UTF-8"):
"""Wraps SpatiaLite's ExportSHP() for exporting a table to shp."""
q = "SELECT ExportSHP(?, ?, ?, ?);"
if out_path.endswith(".shp"):
out_path = out_path.replace(".shp", "")
geom_col = self.geometries[table]["f_geometry_column"]
with ToggleVerbose(self.verbose):
self.cur.execute(q, (table, geom_col, out_path, charset))
return self.cur.fetchone()
def export_dbf(self, table_name, out_path):
"""Wraps SpatiaLite's ExporttDBF() for exporting a table to dbf."""
self.cur.execute(*SpatiaLite.ExportDBF(table_name, out_path))
return
def export_excel(self, table, out_path, sheet_name='Sheet1'):
"""Uses pandas to export Excel files/sheets."""
writer = pd.ExcelWriter(out_path, engine='openpyxl')
if os.path.exists(out_path):
book = load_workbook(out_path)
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
self.get_dataframe(table).to_excel(
writer,
sheet_name=sheet_name,
index=False)
writer.save()
return
def export_excel_sheets(self, tables, out_path, strip_view=True):
"""Uses pandas to export Excel files/sheets.
Args:
tables (list): list of tables to export to Excel sheets
out_path (str): output path of new XLSX file
strip_view (bool): removes 'view' from table/view names
"""
writer = pd.ExcelWriter(out_path, engine='xlsxwriter')
for tbl in tables:
sheet_name = tbl
if strip_view:
sheet_name = re.sub("(?i)_?view_?", "", tbl)
self.get_dataframe(tbl).to_excel(
writer, sheet_name=sheet_name, index=False)
writer.save()
return
# =========================================================================
# RETURN/DISPLAY METHODS
def get_dataframe(self, table_name):
"""Get a non-spatial table as a pandas.DataFrame."""
try: # TODO: sort of stupid to even try this...
df = getattr(self.tables, table_name).all()
except AttributeError:
df = self.query("SELECT * FROM {{tbl}};", data={"tbl": table_name})
return df
def table_to_geodataframe(self, table_name):
"""Return a SpatiaLite table as a geopandas.GeoDataFrame."""
return self.query_to_geodataframe(
("SELECT *, "
" AsText(geometry) AS wkt, "
" SRID(geometry) AS srid "
"FROM {{tbl}}"),
{"tbl": table_name})
def update_schema(self):
"""Alias for _create_sqlite_metatable and refresh_schema."""
with ToggleVerbose(self.verbose):
try:
self._create_sqlite_metatable()
except: # TODO: compile SQLite with RTree
pass
self.refresh_schema()
return
class SpatiaLite(object):
"""Wrap SpatiaLite I/O functions as static methods.
Args:
connection (db.DB): database object
Returns a tuple of: (SQL statement with ?'s, (Args,))
Use:
I/O using geopandas is preferred when possible.
>>> database.cur.execute(*SpatiaLite.ExportDBF(<args>))
Note:
We break PEP8 with TitleMethods to stay consistant with SpatiaLite.
"""
# TODO: use doc strings from website
# TODO: def ImportSHP(self, filename, table, charset="UTF-8", srid=0,
# geom_column="", pk_column="", geometry_type="", coerce2D=0,
# compressed=0, spatial_index=0, text_dates=0):
# TODO: get geometry_type, srid, etc from geometry_columns via self.con
# TODO: def ExportSHP(table, geom_column, filename,
# charset="UTF-8", geom_type=""):
@staticmethod
def ImportDBF(filename, table, charset="UTF-8",
pk_col="DEFAULT", text_dates=0):
"""SpatiaLite function: ImportDBF."""
# TODO: read contents of .cpg file if exists
if pk_col == "DEFAULT" and text_dates == 0:
return ("SELECT ImportDBF(?, ?, ?);", (filename, table, charset))
return ("SELECT ImportDBF(?, ?, ?, ?, ?);",
(filename, table, charset, pk_col, text_dates))
@staticmethod
def ExportDBF(table, filename, charset="UTF-8"):
"""SpatiaLite function: ExportDBF."""
return ("SELECT ExportDBF(?, ?, ?);", (table, filename, charset))
@staticmethod
def ExportKML(table, geo_column, filename,
precision=8, name_column="", description=""):
"""SpatiaLite function: ExportKML."""
return ("SELECT ExportKML(?, ?, ?, ?, ?, ?);",
(table, geo_column, filename, precision,
name_column, description))
# TODO: def ExportGeoJSON(self, table, geo_column, filename,
# format="withShortCRS", precision=8):
@staticmethod
def ImportXLS(self, filename, table,
worksheet_index=0, first_line_titles=0):
"""SpatiaLite function: ImportXLS."""
return ("SELECT ImportXLS(?, ?, ?, ?);",
(filename, table, worksheet_index, first_line_titles))
# TODO: Import/Export DXF (AutoCAD)
| {
"content_hash": "8590885cb1050e2297f4fd37eafcf024",
"timestamp": "",
"source": "github",
"line_count": 908,
"max_line_length": 85,
"avg_line_length": 37.62775330396476,
"alnum_prop": 0.5490838845636012,
"repo_name": "WindfallLabs/dslw",
"id": "b0493ba82829b5271e5beb9e7c46a7c3d54cc1c7",
"size": "34212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dslw/dbspatial/dbspatial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "22074"
},
{
"name": "PLpgSQL",
"bytes": "5750908"
},
{
"name": "Python",
"bytes": "297261"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import pytest
from copy import deepcopy
from .forms import SimpleForm
from webob.multidict import MultiDict
from wtforms import TextField
from wtforms.validators import InputRequired
from wtforms_dynamic_fields import WTFormsDynamicFields
""" This test module uses PyTest (py.test command) for its testing. """
@pytest.fixture(scope="module")
def setup(request):
""" Initiate the basic POST mockup. """
post = MultiDict()
post.add(u'first_name',u'John')
post.add(u'last_name',u'Doe')
return post
# Below follow the actual tests
def test_add_single_field_without_validation(setup):
""" Test correct re-injection of single field by WTForms
No sets - No error situation.
Fields email has no validator and this is invalid.
It should be present after validation.
"""
post = deepcopy(setup)
post.add(u'email', '')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('email','Email', TextField)
form = dynamic_form.process(SimpleForm,
post)
assert form.email() == '<input id="email" name="email" type="text" value="">'
assert form.email.label.text == 'Email'
def test_add_single_field_with_validation_error(setup):
""" Test correct re-injection of single field by WTForms
No sets - Error situation.
Fields email is invalid thus should trigger an error
after validation and be present in the form.
"""
post = deepcopy(setup)
post.add(u'email', '')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('email','Email', TextField)
dynamic_form.add_validator('email', InputRequired)
form = dynamic_form.process(SimpleForm,
post)
assert form.validate() == False
assert form.errors['email'] == ['This field is required.']
def test_add_single_field_with_validation_success(setup):
""" Test correct re-injection of single field by WTForms
No sets - No error situation.
Fields email is valid and should be present in the form
after validation.
"""
post = deepcopy(setup)
post.add(u'email', 'foo')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('email','Email', TextField)
dynamic_form.add_validator('email', InputRequired)
form = dynamic_form.process(SimpleForm,
post)
assert form.validate() == True
assert form.email() == '<input id="email" name="email" type="text" value="foo">'
def test_sets_of_single_fields(setup):
""" Test correct re-injection of multiple fields by WTForms
Sets - No error situation.
Fields email_x are valid and should be present in
the form after validation.
"""
post = deepcopy(setup)
post.add(u'email_1', 'one@mail.mock')
post.add(u'email_2', 'two@mail.mock')
post.add(u'email_3', 'three@mail.mock')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('email','Email', TextField)
dynamic_form.add_validator('email', InputRequired)
form = dynamic_form.process(SimpleForm,
post)
assert form.validate() == True
assert form.email_1.data == 'one@mail.mock'
assert form.email_2.data == 'two@mail.mock'
assert form.email_3.data == 'three@mail.mock'
assert form.email_1() == '<input id="email_1" name="email_1" type="text" value="one@mail.mock">'
assert form.email_2() == '<input id="email_2" name="email_2" type="text" value="two@mail.mock">'
assert form.email_3() == '<input id="email_3" name="email_3" type="text" value="three@mail.mock">'
def test_sets_of_multiple_single_fields(setup):
""" Test correct re-injection of multiple sets by WTForms
Sets - No error situation.
Fields email_x and telephone_x are valid and should be
present in the form after validation.
"""
post = deepcopy(setup)
post.add(u'email_1', 'one@mail.mock')
post.add(u'email_2', 'two@mail.mock')
post.add(u'email_3', 'three@mail.mock')
post.add(u'telephone_1', '14564678')
post.add(u'telephone_2', '64578952')
post.add(u'telephone_3', '31794561')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('email','Email', TextField)
dynamic_form.add_field('telephone','Telephone', TextField)
dynamic_form.add_validator('email', InputRequired)
dynamic_form.add_validator('telephone', InputRequired)
form = dynamic_form.process(SimpleForm,
post)
assert form.validate() == True
assert form.email_1.data == 'one@mail.mock'
assert form.email_2.data == 'two@mail.mock'
assert form.email_3.data == 'three@mail.mock'
assert form.email_1() == '<input id="email_1" name="email_1" type="text" value="one@mail.mock">'
assert form.email_2() == '<input id="email_2" name="email_2" type="text" value="two@mail.mock">'
assert form.email_3() == '<input id="email_3" name="email_3" type="text" value="three@mail.mock">'
assert form.telephone_1.data == '14564678'
assert form.telephone_2.data == '64578952'
assert form.telephone_3.data == '31794561'
assert form.telephone_1() == '<input id="telephone_1" name="telephone_1" type="text" value="14564678">'
assert form.telephone_2() == '<input id="telephone_2" name="telephone_2" type="text" value="64578952">'
assert form.telephone_3() == '<input id="telephone_3" name="telephone_3" type="text" value="31794561">'
def test_automatic_label_suffix(setup):
""" Test %% replacement with single field
Sets - Error situation.
Fields email_x should not be blank.
Merely inducing an error to assert for correct field name replacement.
"""
post = deepcopy(setup)
post.add(u'email_1', '')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('email','Email', TextField)
dynamic_form.add_validator('email', InputRequired, message='Please fill in %email%.')
form = dynamic_form.process(SimpleForm,
post)
form.validate()
assert form.errors['email_1'] == ['Please fill in email_1.']
assert form.email_1() == '<input id="email_1" name="email_1" type="text" value="">'
def test_dependend_automatic_label_suffix(setup):
""" Test %% replacement with many fields
Sets - Error situation.
Fields email_x and telephone_x should not be blank.
Merely inducing an error to assert for correct field name replacement.
"""
post = deepcopy(setup)
post.add(u'email_1', '')
post.add(u'telephone_1', '')
post.add(u'pager_1', '')
post.add(u'email_2', '')
post.add(u'telephone_2', '')
post.add(u'pager_2', '')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('email','Email', TextField)
dynamic_form.add_validator('email', InputRequired, message='Please fill in %telephone% or %pager%.')
form = dynamic_form.process(SimpleForm,
post)
form.validate()
assert form.errors['email_1'] == ['Please fill in telephone_1 or pager_1.']
assert form.errors['email_2'] == ['Please fill in telephone_2 or pager_2.']
assert form.email_1() == '<input id="email_1" name="email_1" type="text" value="">'
assert form.email_2() == '<input id="email_2" name="email_2" type="text" value="">'
def test_long_field_name_replacement(setup):
""" Test %% replacement with many fields
Sets - Error situation.
See if fields with many underscores and digits still
get picked up correctly by the %field_name% formatter.
Merely inducing an error to assert for correct field name replacement.
"""
post = deepcopy(setup)
post.add(u'a_very_long_10_field_name_1', '')
post.add(u'yet_another_34_long_2_name_10_1', '')
post.add(u'a_very_long_10_field_name_2', '')
post.add(u'yet_another_34_long_2_name_10_2', '')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('a_very_long_10_field_name',
'A very long field name', TextField)
dynamic_form.add_validator('a_very_long_10_field_name',
InputRequired,
message='Please fill in %a_very_long_10_field_name% or %yet_another_34_long_2_name_10%.')
dynamic_form.add_field('yet_another_34_long_2_name_10',
'A very long field name', TextField)
dynamic_form.add_validator('yet_another_34_long_2_name_10',
InputRequired,
message='Please fill in %a_very_long_10_field_name% or %yet_another_34_long_2_name_10%.')
form = dynamic_form.process(SimpleForm,
post)
form.validate()
assert form.validate() == False
assert form.errors['a_very_long_10_field_name_1'] == ['Please fill in a_very_long_10_field_name_1 or yet_another_34_long_2_name_10_1.']
assert form.errors['yet_another_34_long_2_name_10_1'] == ['Please fill in a_very_long_10_field_name_1 or yet_another_34_long_2_name_10_1.']
assert form.errors['a_very_long_10_field_name_2'] == ['Please fill in a_very_long_10_field_name_2 or yet_another_34_long_2_name_10_2.']
assert form.errors['yet_another_34_long_2_name_10_2'] == ['Please fill in a_very_long_10_field_name_2 or yet_another_34_long_2_name_10_2.']
assert form.a_very_long_10_field_name_1() == '<input id="a_very_long_10_field_name_1" name="a_very_long_10_field_name_1" type="text" value="">'
assert form.yet_another_34_long_2_name_10_1() == '<input id="yet_another_34_long_2_name_10_1" name="yet_another_34_long_2_name_10_1" type="text" value="">'
assert form.a_very_long_10_field_name_2() == '<input id="a_very_long_10_field_name_2" name="a_very_long_10_field_name_2" type="text" value="">'
assert form.yet_another_34_long_2_name_10_2() == '<input id="yet_another_34_long_2_name_10_2" name="yet_another_34_long_2_name_10_2" type="text" value="">'
| {
"content_hash": "638481637739f2b889b567db04a848d8",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 159,
"avg_line_length": 44.828828828828826,
"alnum_prop": 0.645096463022508,
"repo_name": "Timusan/wtforms-dynamic-fields",
"id": "94f63392cde55c8874d921f8cffae779648a86a0",
"size": "9952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_wtforms_dynamic_fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "34786"
}
],
"symlink_target": ""
} |
from LSFEventScraper import LSFEventScraper
from LSFEventType import LSFEventType
__author__ = 'pascal'
scraper = LSFEventScraper(event_type=LSFEventType.normal_event, logging=False)
# FIRST SCENARIO
#===============
# - Fetches all events from HTW-Berlin.de and stores them to memory.
scraper.scrape_events()
# - Sends a TRUNCATE command to the database, to delete all current rows.
scraper.db_access.reset()
# - Sends saves all events to the database.
scraper.save_events_to_db()
# SECOND SCENARIO
#================
# - Fetches all day-overviews and stores them as html files to ./data_events/
# scraper.crawl_day_pages_and_save_to_disk()
# - ...Later... After you've fetched the pages, you can scrape and store the events later.
# - Scrapes all local sites and stores them to memory
#scraper.scrape_local_sites()
# - Sends a TRUNCATE command to the database, to delete all current rows.
#scraper.db_access.reset()
# - Sends saves all events to the database.
#scraper.save_events_to_db()
| {
"content_hash": "94294a1cf89fcf4727420185222c6511",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 90,
"avg_line_length": 29.441176470588236,
"alnum_prop": 0.7262737262737263,
"repo_name": "pascalweiss/LSFEventScraper",
"id": "05964731b61ffa7d7e34dc6ffe230308d9253f0c",
"size": "1001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23309"
}
],
"symlink_target": ""
} |
import yaml
from infcommon.info_container.info_container import InfoContainer
class YamlReaderNotValidFileError(BaseException):
pass
class YamlReader(object):
def __init__(self, path):
self._path = path
def get(self, key):
return self._load_file().get(key)
def get_info_container(self):
return InfoContainer(self._load_file(), return_none=True)
def get_key_by(self, value):
for key, value_ in self._load_file().iteritems():
if not isinstance(value_, bool) and value in value_:
return key
def get_all(self):
return self._load_file()
def __getitem__(self, key):
return self._load_file()[key]
def _load_file(self):
with open(self._path) as f:
try:
content = yaml.load(f)
return content
except yaml.error.MarkedYAMLError as exc:
raise YamlReaderNotValidFileError(str(exc))
| {
"content_hash": "772fc3c6e13dfc5a24da53d079385b97",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 65,
"avg_line_length": 25.5,
"alnum_prop": 0.5985552115583075,
"repo_name": "aleasoluciones/infcommon",
"id": "c142dd7643976b1974a4fb2f03c7dd7d2c6bf9b9",
"size": "994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infcommon/yaml_reader/yaml_reader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63321"
},
{
"name": "Shell",
"bytes": "1878"
}
],
"symlink_target": ""
} |
import click
@click.group()
def cli():
pass
@click.command()
def create():
print "create command!"
def main():
cli.add_command(create)
cli()
if __name__ == '__main__':
main() | {
"content_hash": "12e5b951eb4f40292319cfba92ab323d",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 27,
"avg_line_length": 11.941176470588236,
"alnum_prop": 0.5566502463054187,
"repo_name": "bdastur/utils",
"id": "39af5edd040a0bf8359a737633d3d99c92d29998",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "builder/builder/builder.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92370"
},
{
"name": "Go",
"bytes": "9499"
},
{
"name": "HTML",
"bytes": "1039804"
},
{
"name": "JavaScript",
"bytes": "3071606"
},
{
"name": "Makefile",
"bytes": "108"
},
{
"name": "Python",
"bytes": "107160"
},
{
"name": "Shell",
"bytes": "13015"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand, CommandError
from annotations.models import Language, Tense, Annotation, Fragment, LabelKey, Label
from selections.models import Selection
class Command(BaseCommand):
help = 'Imports Tenses/Labels for Annotations, Selections, and Fragments'
def add_arguments(self, parser):
parser.add_argument('language', type=str)
parser.add_argument('filenames', nargs='+', type=str)
parser.add_argument('--model', action='store', dest='model', default='annotation')
def handle(self, *args, **options):
try:
language = Language.objects.get(iso=options['language'])
except Language.DoesNotExist:
raise CommandError('Language {} does not exist'.format(options['language']))
for filename in options['filenames']:
with open(filename, 'r') as csvfile:
try:
process_file(csvfile, language, options['model'])
self.stdout.write('Successfully imported labels')
except ValueError as e:
raise CommandError(e.message)
def process_file(f, language, model='annotation'):
f = iter(f)
header = next(f)
if isinstance(header, bytes):
header = header.decode()
header = header.strip().split('\t')
columns = []
for h in header[1:]:
if isinstance(h, bytes):
h = h.decode()
if h.lower() in ['tense', 'comments']:
columns.append(h)
else:
try:
key = LabelKey.objects.get(title__iexact=h)
columns.append(key)
except LabelKey.DoesNotExist:
raise ValueError('Unknown label "{}"'.format(h))
for row in f:
row = row.decode().strip()
if row:
encoded = row.split('\t')
if model == 'annotation':
obj = get_annotation(encoded)
elif model == 'selection':
obj = get_selection(encoded)
elif model == 'fragment':
obj = get_fragment(encoded)
else:
raise ValueError('Unknown model {}'.format(model))
update_fields(obj, language, encoded, columns)
def update_fields(obj, language, row, columns):
for idx, column in enumerate(columns):
if idx + 1 >= len(row):
continue
cell = row[idx + 1]
if column == 'tense':
try:
obj.tense = Tense.objects.get(title__iexact=cell, language=language)
except Tense.DoesNotExist:
raise ValueError('Tense with title "{}" not found.'.format(row[1]))
elif column == 'comments':
if isinstance(obj, (Annotation, Selection)):
obj.comments = cell
else:
raise ValueError('Cannot add comments to Fragment')
elif isinstance(column, LabelKey):
if column.language_specific:
label, created = Label.objects.get_or_create(title=cell, key=column, language=language)
else:
label, created = Label.objects.get_or_create(title=cell, key=column)
if created:
label.save()
for existing in obj.labels.filter(key=column):
obj.labels.remove(existing)
obj.labels.add(label)
obj.save()
def get_annotation(row):
try:
return Annotation.objects.get(pk=row[0])
except Annotation.DoesNotExist:
raise ValueError('Annotation with pk {} not found.'.format(row[0]))
def get_selection(row):
try:
return Selection.objects.get(pk=row[0])
except Selection.DoesNotExist:
raise ValueError('Selection with pk {} not found.'.format(row[0]))
def get_fragment(row):
try:
return Fragment.objects.get(pk=row[0])
except Fragment.DoesNotExist:
raise ValueError('Fragment with pk {} not found.'.format(row[0]))
| {
"content_hash": "9b3f15b847ddcb66327b371c11ab72e3",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 103,
"avg_line_length": 33.70338983050848,
"alnum_prop": 0.5803369373899925,
"repo_name": "UUDigitalHumanitieslab/timealign",
"id": "4854b39b239c32252ea60df34550e65bc82cc17d",
"size": "4002",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "annotations/management/commands/import_tenses.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4412"
},
{
"name": "HTML",
"bytes": "156742"
},
{
"name": "JavaScript",
"bytes": "17478"
},
{
"name": "Python",
"bytes": "387902"
},
{
"name": "XSLT",
"bytes": "2171"
}
],
"symlink_target": ""
} |
import numpy as np
import re
import itertools
from collections import Counter
from utility import build_vocab, clean_str
def load_data_and_labels(pos, neg):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open(pos).readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(neg).readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
#x_text = [clean_str(sent) for sent in x_text]
x_text = [sent for sent in x_text]
x_text = [s.split(" ") for s in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def pad_sentences(sentences, padding_word="<PAD/>"):
"""
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def build_input_data(sentences, labels, word2index):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
x = np.array([[word2index[word] for word in sentence] for sentence in sentences])
y = np.array(labels)
return [x, y]
def mapping_input(sentences, word2index):
x = np.array([[word2index[word] for word in sentence] for sentence in sentences], dtype='int32')
return x
def hasA_butB(sentences, padding_word="<PAD/>"):
sentences_S = []
sentences_B = []
for sentence in sentences:
if not 'but' in sentence: continue
i = sentence.index('but')
sentences_S.append(sentence[:])
sentences_B.append([padding_word] * (i + 1) + sentence[i+1:])
return sentences_S, sentences_B
def load_data():
"""
Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
# Load and preprocess data
sentences, labels = load_data_and_labels()
sentences_padded = pad_sentences(sentences)
vocabulary, word2index = build_vocab(sentences_padded)
x, y = build_input_data(sentences_padded, labels, word2index)
return [x, y, vocabulary, word2index]
if __name__ == '__main__':
load_data()
| {
"content_hash": "46e1b18ff07802db2aab2b43e55c6541",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 100,
"avg_line_length": 35.64102564102564,
"alnum_prop": 0.6633093525179856,
"repo_name": "3rduncle/knowledgeflow",
"id": "0df3b08ac6c97318a893040d27227bd3d63fabef",
"size": "2780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "knowledgeflow/utility/mr_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "12514"
},
{
"name": "Python",
"bytes": "90259"
}
],
"symlink_target": ""
} |
"""This file runs subsampled value iteration using metrics.
It estimates the value function for a set of points using a pre-defined state
metric to obtain the approximant from the nearest known neighbour.
This module will run a number of trials on a set of possible metrics and compile
the results in a plot.
"""
import os
import gin
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow.compat.v1 as tf
DEFAULT_SUBSAMPLING_FRACTIONS = (0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7,
0.8, 0.9, 1.0)
@gin.configurable
def experiment(base_dir, env, metrics,
subsampling_fractions=DEFAULT_SUBSAMPLING_FRACTIONS, run=0,
random_mdp=True, verbose=False, aggregation_method=None):
"""Module to run the experiment.
Args:
base_dir: str, base directory where to save the files.
env: an environment specifying the true underlying MDP.
metrics: list of metrics which will be used for the nearest-neighbour
approximants.
subsampling_fractions: list of floats specifying the number of states to
subsample for the approximant.
run: int, run id.
random_mdp: bool, whether the environment is a random MDP or not.
verbose: bool, whether to print verbose messages.
aggregation_method: str, dummy variable
Returns:
Dict containing statistics.
"""
del base_dir
del aggregation_method
del random_mdp
if env.values is None:
tf.logging.info('Values must have already been computed.')
return
data = {
'Metric': [],
'num_known_states': [],
'run': [],
'avg_error': [],
'max_error': [],
'avg_error_q': [],
'max_error_q': []
}
for subsample_fraction in subsampling_fractions:
for metric in metrics:
if metric.metric is None:
continue
if verbose:
tf.logging.info('***Run {}, {}, {}'.format(
subsample_fraction, metric.name, run))
num_subsampled_states = int(env.num_states * subsample_fraction)
num_known_states = env.num_states - num_subsampled_states
subsamples = np.random.choice(env.num_states,
size=num_subsampled_states, replace=False)
subsampled_metric = np.copy(metric.metric)
# We first set all distances between subsampled states to np.inf.
for s1 in subsamples:
for s2 in subsamples:
subsampled_metric[s1, s2] = np.inf
max_error = 0.
avg_error = 0.
max_error_q = 0.
avg_error_q = 0.
for subsample in subsamples:
nearest_neighbor = np.argmin(subsampled_metric[subsample])
value_estimate_error = abs(
env.values[nearest_neighbor] - env.values[subsample])
q_value_estimate_error = np.max(abs(
env.q_values[nearest_neighbor] - env.q_values[subsample]))
if value_estimate_error > max_error:
max_error = value_estimate_error
if q_value_estimate_error > max_error_q:
max_error_q = q_value_estimate_error
avg_error += value_estimate_error
avg_error_q += q_value_estimate_error
avg_error /= env.num_states
avg_error_q /= env.num_states
data['Metric'].append(metric.label)
data['num_known_states'].append(num_known_states)
data['run'].append(run)
data['max_error'].append(max_error)
data['avg_error'].append(avg_error)
data['max_error_q'].append(max_error_q)
data['avg_error_q'].append(avg_error_q)
return data
def plot_data(base_dir, data):
"""Plot the data collected from all experiment runs."""
df = pd.DataFrame(data=data)
for error in ['max_error_q', 'avg_error_q']:
plt.subplots(1, 1, figsize=(8, 6))
sns.lineplot(x='num_known_states', y=error, hue='Metric', data=df,
ci=99, lw=3)
plt.xlabel('Number of known states', fontsize=24)
ylabel = r'$L_{\infty}$ Error' if error == 'max_error' else 'Avg. Error'
plt.ylabel(ylabel, fontsize=24)
plt.legend(fontsize=18)
pdf_file = os.path.join(base_dir,
'subsampled_value_iteration_{}.pdf'.format(error))
with tf.gfile.GFile(pdf_file, 'w') as f:
plt.savefig(f, format='pdf', dpi=300, bbox_inches='tight')
plt.clf()
plt.close('all')
| {
"content_hash": "4c72fc6403a16d112bb9e6f7d6b01a7f",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 80,
"avg_line_length": 36.516949152542374,
"alnum_prop": 0.6368066836853098,
"repo_name": "google-research/google-research",
"id": "9dc1624f342bb97bb78b245c2523088996d3c2ea",
"size": "4917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rl_metrics_aaai2021/subsampled_value_iteration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
__author__ = 'paul'
from google.appengine.ext import ndb
class QuoteGroup(ndb.Model):
quote_count = ndb.IntegerProperty()
creation_timestamp = ndb.DateTimeProperty(auto_now_add=True)
last_modified_timestamp = ndb.DateTimeProperty(auto_now=True) | {
"content_hash": "ff0e052253f4869c53c1b81ef7f0bd39",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 65,
"avg_line_length": 32.25,
"alnum_prop": 0.748062015503876,
"repo_name": "marshalllaw18/quotechu",
"id": "61bf037eb0af0e4739d193943e9e4b4483467ac9",
"size": "258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quotechu-server/app/QuoteGroup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10569"
}
],
"symlink_target": ""
} |
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
templates_path = ['_templates']
master_doc = 'index'
source_suffix = '.txt'
project = u'OmMongo'
copyright = u'2017, Bapakode Open Source'
version = '1.0'
release = '1.0'
exclude_patterns = ['build', '_static', '_templates']
pygments_style = 'sphinx'
autoclass_content = 'both'
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members', 'inherited-members', 'undoc-members']
# -- Options for HTML output ---------------------------------------------------
html_static_path = ['static']
htmlhelp_basename = 'OmMongo'
html_theme = 'default'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [('index', 'OmMongo.tex', u'OmMongo Documentation',u'Bapakode Open Source', 'manual')]
# -- Options for manual page output --------------------------------------------
man_pages = [('index', 'OmMongo', u'OmMongo Documentation',[u'OmMongo Open Source'], 1)]
bad_names = ['schema_json', 'has_autoload', 'set_parent_on_subtypes',
'subfields', 'has_subfields', 'valid_modifiers',
'set_value', 'update_ops', 'db_field', 'no_real_attributes',
'default', 'localize', 'auto', 'dirty_ops', 'validate_wrap',
'is_valid_unwrap', 'is_valid_wrap', 'unwrap',
'validate_unwrap', 'wrap', 'wrap_value', 'PrimitiveField',
'NumberField', 'SequenceField', 'child_type',
'type', 'sub_type', 'compute_value', 'in_transaction',
'autoflush', 'cache_write', 'cache_read', 'transaction_id',
'execute_update', 'execute_remove', 'execute_find_and_modify']
bad_exc = ['message', 'args']
def skip_common(app, what, name, obj, skip, options):
print what, name, obj
if what == 'exception' and name in bad_exc:
return True
if name in bad_names:
return True
return skip
def setup(app):
app.connect('autodoc-skip-member', skip_common)
| {
"content_hash": "e521539c4c3f43c63c7df54ad73573fd",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 116,
"avg_line_length": 41.54716981132076,
"alnum_prop": 0.5408719346049047,
"repo_name": "bapakode/OmMongo",
"id": "3b97707c84dfa6ad4ef71884652195e7bef599ac",
"size": "2347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "270963"
}
],
"symlink_target": ""
} |
"""The volumes extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.objects import block_device as block_device_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import uuidutils
from nova import volume
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'volumes')
authorize_attach = extensions.extension_authorizer('compute',
'volume_attachments')
def _translate_volume_detail_view(context, vol):
"""Maps keys for volumes details view."""
d = _translate_volume_summary_view(context, vol)
# No additional data / lookups at the moment
return d
def _translate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol['id']
d['status'] = vol['status']
d['size'] = vol['size']
d['availabilityZone'] = vol['availability_zone']
d['createdAt'] = vol['created_at']
if vol['attach_status'] == 'attached':
d['attachments'] = [_translate_attachment_detail_view(vol['id'],
vol['instance_uuid'],
vol['mountpoint'])]
else:
d['attachments'] = [{}]
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
if vol['volume_type_id'] and vol.get('volume_type'):
d['volumeType'] = vol['volume_type']['name']
else:
d['volumeType'] = vol['volume_type_id']
d['snapshotId'] = vol['snapshot_id']
LOG.audit(_("vol=%s"), vol, context=context)
if vol.get('volume_metadata'):
d['metadata'] = vol.get('volume_metadata')
else:
d['metadata'] = {}
return d
def make_volume(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('availabilityZone')
elem.set('createdAt')
elem.set('displayName')
elem.set('displayDescription')
elem.set('volumeType')
elem.set('snapshotId')
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
selector='attachments')
make_attachment(attachment)
# Attach metadata node
elem.append(common.MetadataTemplate())
class VolumeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volume', selector='volume')
make_volume(root)
return xmlutil.MasterTemplate(root, 1)
class VolumesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumes')
elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes')
make_volume(elem)
return xmlutil.MasterTemplate(root, 1)
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_volume(self, node):
"""Marshal the volume attribute of a parsed request."""
vol = {}
volume_node = self.find_first_child_named(node, 'volume')
attributes = ['display_name', 'display_description', 'size',
'volume_type', 'availability_zone']
for attr in attributes:
if volume_node.getAttribute(attr):
vol[attr] = volume_node.getAttribute(attr)
metadata_node = self.find_first_child_named(volume_node, 'metadata')
if metadata_node is not None:
vol['metadata'] = self.extract_metadata(metadata_node)
return vol
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted create volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
def default(self, string):
"""Deserialize an xml-formatted volume create request."""
dom = xmlutil.safe_minidom_parse_string(string)
vol = self._extract_volume(dom)
return {'body': {'volume': vol}}
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super(VolumeController, self).__init__()
@wsgi.serializers(xml=VolumeTemplate)
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['nova.context']
authorize(context)
try:
vol = self.volume_api.get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
return {'volume': _translate_volume_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete volume with id: %s"), id, context=context)
try:
self.volume_api.delete(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
@wsgi.serializers(xml=VolumesTemplate)
def index(self, req):
"""Returns a summary list of volumes."""
return self._items(req, entity_maker=_translate_volume_summary_view)
@wsgi.serializers(xml=VolumesTemplate)
def detail(self, req):
"""Returns a detailed list of volumes."""
return self._items(req, entity_maker=_translate_volume_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of volumes, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
volumes = self.volume_api.get_all(context)
limited_list = common.limited(volumes, req)
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
@wsgi.serializers(xml=VolumeTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new volume."""
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'volume'):
raise exc.HTTPUnprocessableEntity()
vol = body['volume']
vol_type = vol.get('volume_type', None)
metadata = vol.get('metadata', None)
snapshot_id = vol.get('snapshot_id')
if snapshot_id is not None:
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
else:
snapshot = None
size = vol.get('size', None)
if size is None and snapshot is not None:
size = snapshot['volume_size']
LOG.audit(_("Create volume of %s GB"), size, context=context)
availability_zone = vol.get('availability_zone', None)
try:
new_volume = self.volume_api.create(
context,
size,
vol.get('display_name'),
vol.get('display_description'),
snapshot=snapshot,
volume_type=vol_type,
metadata=metadata,
availability_zone=availability_zone
)
except exception.InvalidInput as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
retval = _translate_volume_detail_view(context, dict(new_volume))
result = {'volume': retval}
location = '%s/%s' % (req.url, new_volume['id'])
return wsgi.ResponseObject(result, headers=dict(location=location))
def _translate_attachment_detail_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment details view."""
d = _translate_attachment_summary_view(volume_id,
instance_uuid,
mountpoint)
# No additional data / lookups at the moment
return d
def _translate_attachment_summary_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment summary view."""
d = {}
# NOTE(justinsb): We use the volume id as the id of the attachment object
d['id'] = volume_id
d['volumeId'] = volume_id
d['serverId'] = instance_uuid
if mountpoint:
d['device'] = mountpoint
return d
def make_attachment(elem):
elem.set('id')
elem.set('serverId')
elem.set('volumeId')
elem.set('device')
class VolumeAttachmentTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumeAttachment',
selector='volumeAttachment')
make_attachment(root)
return xmlutil.MasterTemplate(root, 1)
class VolumeAttachmentsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumeAttachments')
elem = xmlutil.SubTemplateElement(root, 'volumeAttachment',
selector='volumeAttachments')
make_attachment(elem)
return xmlutil.MasterTemplate(root, 1)
class VolumeAttachmentController(wsgi.Controller):
"""The volume attachment API controller for the OpenStack API.
A child resource of the server. Note that we use the volume id
as the ID of the attachment (though this is not guaranteed externally)
"""
def __init__(self, ext_mgr=None):
self.compute_api = compute.API()
self.volume_api = volume.API()
self.ext_mgr = ext_mgr
super(VolumeAttachmentController, self).__init__()
@wsgi.serializers(xml=VolumeAttachmentsTemplate)
def index(self, req, server_id):
"""Returns the list of volume attachments for a given instance."""
context = req.environ['nova.context']
authorize_attach(context, action='index')
return self._items(req, server_id,
entity_maker=_translate_attachment_summary_view)
@wsgi.serializers(xml=VolumeAttachmentTemplate)
def show(self, req, server_id, id):
"""Return data about the given volume attachment."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='show')
volume_id = id
try:
instance = self.compute_api.get(context, server_id)
except exception.NotFound:
raise exc.HTTPNotFound()
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
if not bdms:
LOG.debug("Instance %s is not attached.", server_id)
raise exc.HTTPNotFound()
assigned_mountpoint = None
for bdm in bdms:
if bdm.volume_id == volume_id:
assigned_mountpoint = bdm.device_name
break
if assigned_mountpoint is None:
LOG.debug("volume_id not found")
raise exc.HTTPNotFound()
return {'volumeAttachment': _translate_attachment_detail_view(
volume_id,
instance['uuid'],
assigned_mountpoint)}
def _validate_volume_id(self, volume_id):
if not uuidutils.is_uuid_like(volume_id):
msg = _("Bad volumeId format: volumeId is "
"not in proper format (%s)") % volume_id
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.serializers(xml=VolumeAttachmentTemplate)
def create(self, req, server_id, body):
"""Attach a volume to an instance."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='create')
if not self.is_valid_body(body, 'volumeAttachment'):
raise exc.HTTPUnprocessableEntity()
try:
volume_id = body['volumeAttachment']['volumeId']
except KeyError:
msg = _("volumeId must be specified.")
raise exc.HTTPBadRequest(explanation=msg)
device = body['volumeAttachment'].get('device')
self._validate_volume_id(volume_id)
LOG.audit(_("Attach volume %(volume_id)s to instance %(server_id)s "
"at %(device)s"),
{'volume_id': volume_id,
'device': device,
'server_id': server_id},
context=context)
try:
instance = self.compute_api.get(context, server_id,
want_objects=True)
device = self.compute_api.attach_volume(context, instance,
volume_id, device)
except exception.NotFound:
raise exc.HTTPNotFound()
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'attach_volume')
# The attach is async
attachment = {}
attachment['id'] = volume_id
attachment['serverId'] = server_id
attachment['volumeId'] = volume_id
attachment['device'] = device
# NOTE(justinsb): And now, we have a problem...
# The attach is async, so there's a window in which we don't see
# the attachment (until the attachment completes). We could also
# get problems with concurrent requests. I think we need an
# attachment state, and to write to the DB here, but that's a bigger
# change.
# For now, we'll probably have to rely on libraries being smart
# TODO(justinsb): How do I return "accepted" here?
return {'volumeAttachment': attachment}
def update(self, req, server_id, id, body):
if (not self.ext_mgr or
not self.ext_mgr.is_loaded('os-volume-attachment-update')):
raise exc.HTTPBadRequest()
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='update')
if not self.is_valid_body(body, 'volumeAttachment'):
raise exc.HTTPUnprocessableEntity()
old_volume_id = id
old_volume = self.volume_api.get(context, old_volume_id)
try:
new_volume_id = body['volumeAttachment']['volumeId']
except KeyError:
msg = _("volumeId must be specified.")
raise exc.HTTPBadRequest(explanation=msg)
self._validate_volume_id(new_volume_id)
new_volume = self.volume_api.get(context, new_volume_id)
try:
instance = self.compute_api.get(context, server_id,
want_objects=True)
except exception.NotFound:
raise exc.HTTPNotFound()
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
found = False
try:
for bdm in bdms:
if bdm.volume_id != old_volume_id:
continue
try:
self.compute_api.swap_volume(context, instance, old_volume,
new_volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'swap_volume')
if not found:
raise exc.HTTPNotFound()
else:
return webob.Response(status_int=202)
def delete(self, req, server_id, id):
"""Detach a volume from an instance."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='delete')
volume_id = id
LOG.audit(_("Detach volume %s"), volume_id, context=context)
try:
instance = self.compute_api.get(context, server_id,
want_objects=True)
except exception.NotFound:
raise exc.HTTPNotFound()
volume = self.volume_api.get(context, volume_id)
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
if not bdms:
LOG.debug("Instance %s is not attached.", server_id)
raise exc.HTTPNotFound()
found = False
try:
for bdm in bdms:
if bdm.volume_id != volume_id:
continue
if bdm.is_root:
msg = _("Can't detach root device volume")
raise exc.HTTPForbidden(explanation=msg)
try:
self.compute_api.detach_volume(context, instance, volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'detach_volume')
if not found:
raise exc.HTTPNotFound()
else:
return webob.Response(status_int=202)
def _items(self, req, server_id, entity_maker):
"""Returns a list of attachments, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
try:
instance = self.compute_api.get(context, server_id)
except exception.NotFound:
raise exc.HTTPNotFound()
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
limited_list = common.limited(bdms, req)
results = []
for bdm in limited_list:
if bdm.volume_id:
results.append(entity_maker(bdm.volume_id,
bdm.instance_uuid,
bdm.device_name))
return {'volumeAttachments': results}
def _translate_snapshot_detail_view(context, vol):
"""Maps keys for snapshots details view."""
d = _translate_snapshot_summary_view(context, vol)
# NOTE(gagupta): No additional data / lookups at the moment
return d
def _translate_snapshot_summary_view(context, vol):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = vol['id']
d['volumeId'] = vol['volume_id']
d['status'] = vol['status']
# NOTE(gagupta): We map volume_size as the snapshot size
d['size'] = vol['volume_size']
d['createdAt'] = vol['created_at']
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
return d
def make_snapshot(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('createdAt')
elem.set('displayName')
elem.set('displayDescription')
elem.set('volumeId')
class SnapshotTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshot', selector='snapshot')
make_snapshot(root)
return xmlutil.MasterTemplate(root, 1)
class SnapshotsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshots')
elem = xmlutil.SubTemplateElement(root, 'snapshot',
selector='snapshots')
make_snapshot(elem)
return xmlutil.MasterTemplate(root, 1)
class SnapshotController(wsgi.Controller):
"""The Snapshots API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super(SnapshotController, self).__init__()
@wsgi.serializers(xml=SnapshotTemplate)
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['nova.context']
authorize(context)
try:
vol = self.volume_api.get_snapshot(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
return {'snapshot': _translate_snapshot_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete snapshot with id: %s"), id, context=context)
try:
self.volume_api.delete_snapshot(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
@wsgi.serializers(xml=SnapshotsTemplate)
def index(self, req):
"""Returns a summary list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_summary_view)
@wsgi.serializers(xml=SnapshotsTemplate)
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of snapshots, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
snapshots = self.volume_api.get_all_snapshots(context)
limited_list = common.limited(snapshots, req)
res = [entity_maker(context, snapshot) for snapshot in limited_list]
return {'snapshots': res}
@wsgi.serializers(xml=SnapshotTemplate)
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'snapshot'):
raise exc.HTTPUnprocessableEntity()
snapshot = body['snapshot']
volume_id = snapshot['volume_id']
LOG.audit(_("Create snapshot from volume %s"), volume_id,
context=context)
force = snapshot.get('force', False)
try:
force = strutils.bool_from_string(force, strict=True)
except ValueError:
msg = _("Invalid value '%s' for force.") % force
raise exception.InvalidParameterValue(err=msg)
if force:
create_func = self.volume_api.create_snapshot_force
else:
create_func = self.volume_api.create_snapshot
new_snapshot = create_func(context, volume_id,
snapshot.get('display_name'),
snapshot.get('display_description'))
retval = _translate_snapshot_detail_view(context, new_snapshot)
return {'snapshot': retval}
class Volumes(extensions.ExtensionDescriptor):
"""Volumes support."""
name = "Volumes"
alias = "os-volumes"
namespace = "http://docs.openstack.org/compute/ext/volumes/api/v1.1"
updated = "2011-03-25T00:00:00Z"
def get_resources(self):
resources = []
# NOTE(justinsb): No way to provide singular name ('volume')
# Does this matter?
res = extensions.ResourceExtension('os-volumes',
VolumeController(),
collection_actions={'detail': 'GET'})
resources.append(res)
attachment_controller = VolumeAttachmentController(self.ext_mgr)
res = extensions.ResourceExtension('os-volume_attachments',
attachment_controller,
parent=dict(
member_name='server',
collection_name='servers'))
resources.append(res)
res = extensions.ResourceExtension('os-volumes_boot',
inherits='servers')
resources.append(res)
res = extensions.ResourceExtension('os-snapshots',
SnapshotController(),
collection_actions={'detail': 'GET'})
resources.append(res)
return resources
| {
"content_hash": "ace703985d8d98f35b02b0db9977ab7d",
"timestamp": "",
"source": "github",
"line_count": 730,
"max_line_length": 79,
"avg_line_length": 34.321917808219176,
"alnum_prop": 0.5944522051486729,
"repo_name": "CiscoSystems/nova",
"id": "757bd76c0e86150c79277669b11674ec1039dae8",
"size": "25691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/contrib/volumes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13926229"
},
{
"name": "Shell",
"bytes": "17451"
}
],
"symlink_target": ""
} |
from sentry.web.urls import *
| {
"content_hash": "a16e6a14e9e9236c631d4b9597bcdadc",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 29,
"avg_line_length": 30,
"alnum_prop": 0.7666666666666667,
"repo_name": "chroto/sentry-xmpp",
"id": "94a930744be1fc7516066ec0a94178cb25ac2825",
"size": "30",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentry_xmpp/tests/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7738"
}
],
"symlink_target": ""
} |
import argparse
import arrow
import cmd
import datetime
import json
import math
import os
import re
import shlex
import sys
from tabulate import tabulate
import textwrap
import time
from talus_client.cmds import TalusCmdBase
import talus_client.api
import talus_client.errors as errors
import talus_client.utils as utils
from talus_client.models import *
from talus_client.param_model import ModelCmd
class JobCmd(TalusCmdBase):
"""The Talus job command processor
"""
command_name = "job"
def do_info(self, args):
"""List detailed information about the job.
Git-like syntax can also be used here to refer to the most recently created
job. E.g. the command below will show info about the 2nd most recent job:
job info +2
Search information can also be used. If git-like syntax is omitted, only
the first entry returned from the database will be displayed.
job info --all --status running --sort -priority +2
The example above will show information about 2nd highest priority job whose
status is running. Omitting --all will cause the search to be performed only
among _your_ jobs.
"""
if args.strip() == "":
raise errors.TalusApiError("you must provide a name/id of a job to show info about it")
parts = shlex.split(args)
all_mine = False
# this is the default, so just remove this flag and ignore it
if "--all-mine" in parts:
parts.remove("--all-mine")
leftover = []
job_id_or_name = None
search = self._search_terms(parts, out_leftover=leftover)
if len(leftover) > 0:
job_id_or_name = leftover[0]
job = self._resolve_one_model(job_id_or_name, Job, search)
if job is None:
raise errors.TalusApiError("could not find talus job with id {!r}".format(job_id_or_name))
job_errors = "None"
job_logs = "None"
# created value
cv = None
created = ""
if "created" in job.timestamps:
cv = job.timestamps["created"]
created = "{} ({})".format(self._rel_date(cv), self._actual_date(cv))
stopped = ""
# stopped value
sv = None
if job.status["name"] == "cancelled" and "cancelled" in job.timestamps:
sv = job.timestamps["cancelled"]
elif job.status["name"] == "finished" and "finished" in job.timestamps:
sv = job.timestamps["finished"]
if sv is not None:
stopped = "{} ({})".format(self._rel_date(sv), self._actual_date(sv))
# in limits/s
if sv is None:
sv = time.time()
speed = ""
if cv is not None:
# in limits/sec
speed_val = job.progress / (sv - cv)
limits_sec = round(speed_val, 2)
limits_min = round(speed_val * 60, 2)
limits_hour = round(speed_val * 60*60, 2)
limits_day = round(speed_val * 60*60*24, 2)
speed = "{}/s, {}/min, {}/hour, {}/day".format(
limits_sec,
limits_min,
limits_hour,
limits_day
)
if len(job.errors) > 0:
job_errors = "\n\n"
for idx,error in enumerate(job.errors):
job_errors += "\n".join([
"ERROR {}".format(idx),
"---------",
"\n".join(" {}".format(x) for x in error["logs"]),
error["message"],
error["backtrace"],
"\n\n",
])
job_errors = "\n".join([" {}".format(x) for x in job_errors.split("\n")])
if len(job.logs) > 0:
job_logs = "\n\n"
for idx,log in enumerate(job.logs):
job_logs += "\n".join([
"LOG {}".format(idx),
"---------",
log["message"],
"\n".join(" {}".format(x) for x in log["logs"]),
"\n\n",
])
job_logs = "\n".join([" {}".format(x) for x in job_logs.split("\n")])
print("""
ID: {id}
Name: {name}
Status: {status}
Tags: {tags}
Started: {started}
Ended: {ended}
Debug: {debug}
Speed: {speed}
Progress: {percent}% ({progress} / {limit})
Task: {task}
Priority: {priority}
Params: {params}
Network: {network}
Image: {image}
VM Max: {vm_max}
Running VMS: {running_vms}
Errors: {job_errors}
Logs: {job_logs}
""".format(
id = job.id,
name = job.name,
status = job.status["name"],
tags = job.tags,
started = created,
ended = stopped,
debug = job.debug,
speed = speed,
percent = round(100 * job.progress / float(job.limit), 1),
progress = job.progress,
limit = job.limit,
task = self._nice_name(job, "task"),
priority = job.priority,
params = json.dumps(job.params),
network = job.network,
image = self._nice_name(job, "image"),
vm_max = job.vm_max,
running_vms = self._get_running_vms(job),
job_errors = job_errors,
job_logs = job_logs,
))
def _get_running_vms(self, job):
vm_headers = ["slave", "vnc port", "running since", "job idx", "status"]
vm_vals = []
for slave in self._talus_client.slave_iter():
for vm in slave.vms:
if vm["job"] == job.id:
vm_vals.append([
slave.hostname,
vm["vnc_port"],
arrow.get(vm["start_time"]).humanize(),
vm["idx"],
vm["vm_status"]
])
if len(vm_vals) == 0:
return "None"
split = int(math.ceil(len(vm_vals)/2.0))
column1 = vm_vals[:split]
column2 = vm_vals[split:]
table1 = tabulate(column1, headers=vm_headers).split("\n")
table2 = tabulate(column2, headers=vm_headers).split("\n")
longest_t1 = max(len(x) for x in table1)
if len(table2) == 0:
lines = table1
else:
lines = []
for x in xrange(len(table1)):
if x >= len(table2):
lines.append(table1[x])
else:
fmt_string = "{:" + str(longest_t1) + "} | {}"
lines.append(fmt_string.format(table1[x], table2[x]))
return "\n\n" + "\n".join(lines) + "\n"
def do_list(self, args):
"""List jobs in Talus.
job list --search-term value --search-term2 value
By default only running jobs that belong to you are shown. To show
all of your jobs, add --all-mine:
job list --search-term value --all-mine
To show all jobs, use `--all`:
job list --all
Dot notation can be used on subdocuments. The example below is
the verbose form of `--status cancelled`:
job list --status.name cancelled
Sorting, skipping, and number of results can also be set using
`--sort field`, `--skip num`, and `--num num` respectively. A sort
value preceded by a negative sign reverses the sorting order:
job list --status finished --sort -timestamps.finished --skip 10 --num 5
MongoDB operators are allowed (don't forget to escape the $). See
https://docs.mongodb.org/manual/reference/operator/query/:
job list --limit.\\$gt 10
job list --name.\\$regex ".*test.*"
job list --$where "(this.progress / this.limit) > 0.5"
MongoEngine operators are allowed as well. See
http://docs.mongoengine.org/guide/querying.html#query-operators:
job list --name__startswith "test"
job list --limit__gt 10
"""
parts = shlex.split(args)
all_mine = False
if "--all-mine" in parts:
parts.remove("--all-mine")
all_mine = True
search = self._search_terms(parts)
if not all_mine and "status.name" not in search and "--all" not in parts:
self.out("use --all-mine to view all of your jobs (not just unfinished)")
search["status.name"] = "running"
if not all_mine and "--all" not in parts and "num" not in search:
self.out("showing first 20 results")
search["num"] = 20
if "sort" not in search:
search["sort"] = "-timestamps.created"
headers = ["id", "name", "status", "priority", "progress", "image", "task", "tags"]
fields = []
for job in self._talus_client.job_iter(**search):
status = job.status["name"]
if len(job.errors) > 0:
status += " *E"
fields.append([
str(job.id),
job.name,
status,
job.priority,
"{:0.2f}% ({}/{})".format(
job.progress / float(job.limit) * 100,
job.progress,
job.limit
),
self._nice_name(job, "image"),
self._nice_name(job, "task"),
job.tags
])
print(tabulate(fields, headers=headers))
def complete_cancel(self, text, line, bg_idx, end_idx):
"""Do completion for the cancel command
"""
total = []
for job in self._talus_client.job_iter():
total.append(job.name)
total.append(job.id)
matching = filter(lambda x: x.startswith(text), total)
return matching
def do_cancel(self, args):
"""Cancel the job by name or ID in talus
job cancel JOB_NAME_OR_ID
"""
if args.strip() == "":
raise errors.TalusApiError("you must provide a name/id of a job to show info about it")
parts = shlex.split(args)
leftover = []
job_id_or_name = None
search = self._search_terms(parts, out_leftover=leftover)
if len(leftover) > 0:
job_id_or_name = leftover[0]
job = self._resolve_one_model(job_id_or_name, Job, search)
if job is None:
raise errors.TalusApiError("no jobs matched id/search criteria")
self._talus_client.job_cancel(job_id_or_name, job=job)
self.ok("cancelled job {}".format(job.id))
def do_clone(self, args):
"""Create a new job that is an exact duplicate of a previously created
job. Note that +1 and other search parameters can be used to identify the
job to clone.
"""
if args.strip() == "":
raise errors.TalusApiError("you must provide a name/id of a job to show info about it")
parts = shlex.split(args)
leftover = []
job_id_or_name = None
search = self._search_terms(parts, out_leftover=leftover)
if len(leftover) > 0:
job_id_or_name = leftover[0]
job = self._resolve_one_model(job_id_or_name, Job, search)
if job is None:
raise errors.TalusApiError("could not find talus job with id {!r}".format(job_id_or_name))
old_id = job.id
job.clear_id()
clone_match = re.match(r'^(.*)_CLONE_(\d+)$', job.name)
if clone_match is not None:
job.name = "{}_CLONE_{}".format(clone_match.group(1), int(clone_match.group(2))+1)
else:
job.name += "_CLONE_0"
job.timestamps = {"created": time.time()}
job.status = {"name": "run"}
job.errors = []
job.logs = []
job.progress = 0
self._prep_model(job) # make sure our username is tagged in it
if self._go_interactive(parts):
self.do_create("--shell", job=job)
else:
job.save()
self.ok("created job {} ({}) as clone of {}".format(job.name, job.id, old_id))
def do_create(self, args, job=None):
"""Create a new job in Talus
job create TASK_NAME_OR_ID -i IMAGE [-n NAME] [-p PARAMS] [-q QUEUE] [--priority (0-100)] [--network]
-n,--name The name of the job (defaults to name of the task + timestamp)
--priority The priority for the job (0-100, defaults to 50)
--network The network for the image ('all' or 'whitelist'). Whitelist values may
also be a 'whitelist:<domain_or_ip>,<domain_or_ip>' to add domains
to the whitelist. Not specifying additional whitelist hosts results
in a host-only network filter, plus talus-essential hosts.
-q,--queue The queue the job should be inserted into (default: jobs)
-i,--image The image the job should run in (name or id)
-l,--limit The limit for the task. What the limit means is defined by how the tool
reports progress. If the tool does not report progress, then the limit
means the number of total VMs to run.
-t,--tags A comma-separated list of additional tags to add to the job
--vm-max Maximum amount of time a vm should be allowed to run (defaults to 30m)
You may use values such as 30m15s. If no units are used, the value is
assumed to be in seconds.
-p,--params Params for the task (defaults to the default params of the task)
--shell Create the job in an interactive shell (default if already in shell and no args)
--debug All logs are saved to the database (treated as errored, basically)
-f,--params-file The file that contains the params of the job
Examples:
To run the task "CalcFuzzer" while only updating the ``chars`` parameter:
job create "CalcFuzzer" -p '{"chars": "013579+-()/*"}'
"""
args = shlex.split(args)
if self._go_interactive(args):
if job is None:
tasks = list(self._talus_client.task_iter())
fields = []
for x in xrange(len(tasks)):
task = tasks[x]
fields.append([x, task.name, task.id])
headers = ["idx", "name", "task.id"]
idx = utils.idx_prompt(fields, "Which task should the job be based on?", headers=headers)
if idx is None:
return
task = tasks[idx]
job = Job(api_base=self._talus_client._api_base)
job.tags = task.tags
self._prep_model(job)
job.image = task.image
job.task = task.id
job.limit = task.limit
job.name = task.name + " " + str(datetime.datetime.now())
job.params = task.params
job.status = {"name": "run"}
job.vm_max = task.vm_max
job.queue = "jobs"
self.out("basing job on task named {!r} ({})".format(task.name, task.id))
while True:
param_cmd = self._make_model_cmd(job)
cancelled = param_cmd.cmdloop()
if cancelled:
break
error = False
if job.name is None:
self.err("Please set a name for the job")
error = True
if job.image is None:
self.err("You need to set an image, yo")
error = True
if error:
continue
try:
job.timestamps = {"created": time.time()}
job.save()
self.ok("created new job {}".format(job.id))
except errors.TalusApiError as e:
self.err(e.message)
else:
break
return
parser = self._argparser()
parser.add_argument("task_name_or_id")
parser.add_argument("--name", "-n", default=None)
parser.add_argument("--network", default="whitelist")
parser.add_argument("--priority", default=50)
parser.add_argument("--limit", "-l", default=None)
parser.add_argument("--image", "-i", default=None)
parser.add_argument("--queue", "-q", default="jobs")
parser.add_argument("--params", "-p", default=None)
parser.add_argument("--params-file", "-f", default=None)
parser.add_argument("--vm-max", default="30m")
parser.add_argument("--tags", "-t", default="")
parser.add_argument("--debug", default=False, action="store_true")
args = parser.parse_args(args)
params = args.params
if args.params_file is not None:
if not os.path.exists(args.params_file):
raise errors.TalusApiError("params file does not exist: {}".format(args.params_file))
with open(args.params_file, "r") as f:
params = f.read()
if params is not None:
try:
params = json.loads(params)
except Exception as e:
raise errors.TalusApiError("params are not in json format: " + e.message)
tags = args.tags.split(",")
job = self._talus_client.job_create(
task_name_or_id = args.task_name_or_id,
name = args.name,
image = args.image,
params = params,
priority = args.priority,
limit = args.limit,
queue = args.queue,
network = args.network,
vm_max = args.vm_max,
debug = args.debug,
tags = tags
)
self.ok("created job {}".format(job.id))
| {
"content_hash": "61485efddace193060e86adb33169063",
"timestamp": "",
"source": "github",
"line_count": 508,
"max_line_length": 103,
"avg_line_length": 29.248031496062993,
"alnum_prop": 0.628819491183201,
"repo_name": "optiv-labs/talus_client",
"id": "29b0ecb1148e7ece5a15bad12ae841a68537b7a7",
"size": "14899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "talus_client/cmds/jobs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "138533"
}
],
"symlink_target": ""
} |
TAG_FORMAT = '###{bound}_{block}###'
# The template for tag variables that is used to generate the context
# variables for storing the actual email part tags.
TAG_VAR_FORMAT = 'TAG_{BOUND}_{BLOCK}'
| {
"content_hash": "de38d115a0ba73a4fc6c72325c40319e",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 69,
"avg_line_length": 39.8,
"alnum_prop": 0.7085427135678392,
"repo_name": "artemrizhov/django-mail-templated",
"id": "9238962f1e24b2948ca5571bdb1be4700c407fac",
"size": "390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mail_templated/default_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "395"
},
{
"name": "Python",
"bytes": "36040"
},
{
"name": "Shell",
"bytes": "3860"
},
{
"name": "Smarty",
"bytes": "1966"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from keras.datasets import mnist
from hyperemble.neural_net import VanillaNeuralNet
def test_vanilla_neural_net():
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
clf = VanillaNeuralNet(n_layers=2, hidden_dim=200,
keep_prob=0.8, loss_func="auto",
verbose=1, batch_size=128, random_state=1)
clf.fit(X_train, y_train)
res = clf.score(X_test, y_test)
assert res > 0.92
| {
"content_hash": "e5d65be17c8353ba263ea9608e4e6b50",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 69,
"avg_line_length": 33.04347826086956,
"alnum_prop": 0.6026315789473684,
"repo_name": "hduongtrong/hyperemble",
"id": "37d6711658275f1d07977aeff4273891c7394311",
"size": "760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyperemble/neural_net/tests/test_neural_net.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "738"
},
{
"name": "Python",
"bytes": "41885"
}
],
"symlink_target": ""
} |
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.ext.declarative import declarative_base
from geoalchemy2 import *
import os, subprocess
import yaml
import initialize_environment
config = yaml.load(open('config.yaml', 'r'))
postgis_extensions_dir = config['database']['postgis_extensions_dir']
database_type = ''
try:
if os.environ['FLASK_ENV'] == 'test':
database_type = 'test'
database_name = config['database']['test']['db']
database_user = config['database']['test']['user']
database_pass = config['database']['test']['pass']
else:
database_name = config['database']['development']['db']
database_user = config['database']['development']['user']
database_pass = config['database']['development']['pass']
except:
database_type = 'development'
database_name = config['database']['development']['db']
database_user = config['database']['development']['user']
database_pass = config['database']['development']['pass']
engine = create_engine('postgresql://'+database_user+':'+database_pass+'@localhost/' + database_name)
session = scoped_session(sessionmaker(bind=engine))
metadata = MetaData(engine)
Base = declarative_base(metadata=metadata)
Base.query = session.query_property()
def init_db():
import models
metadata.create_all(bind=engine)
session.commit()
def drop_db():
import models
session.close()
metadata.drop_all(bind=engine)
def destroy_db():
postgres_engine = create_engine('postgresql://'+database_user+':'+database_pass+'@localhost/postgres')
conn = postgres_engine.connect()
conn.execute('commit')
conn.execute('drop database ' + database_name)
conn.close()
def create_db():
initialize_environment.main(database_type)
| {
"content_hash": "7a04b0596a7d471f63589947a761da09",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 106,
"avg_line_length": 32.56363636363636,
"alnum_prop": 0.68285873813512,
"repo_name": "colorado-code-for-communities/denver_streets",
"id": "3c212b0bb8325f8395c6932824bb933de6b88f42",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "database.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "124"
},
{
"name": "Python",
"bytes": "26570"
}
],
"symlink_target": ""
} |
"""
tray_data provides the data for the tray GUI.
It handles the XML data and provides the neccessary interfaces.
"""
import logging, os
import xml.etree.cElementTree as ET
from util.ordereddict import oDict
from util.trayErrors import PropertyNotFoundError
import tray_item
log = logging.getLogger("xml_tray")
log.setLevel(logging.WARN)
class TrayItem(tray_item.TrayItem):
"""
Parent Class for all items in a tray.
"""
def __init__(self, element, definition=None, parent=None):
tray_item.TrayItem.__init__(self)
self.parent = parent
self.element = element
self.definition = definition
def Delete(self, parent = None):
if parent:
self.parent = TrayItem(parent)
result = self.parent.element.remove(self.element)
self.SetChanged(True)
def ApplyType(self, data, defType):
if defType == "int":
if data == "":
return None;
return int(data)
if defType == "float":
if data == "":
return float(0)
return float(data)
if defType == "str":
return unicode(data)
def GetParent(self):
return self.parent
def GetCopy(self):
return TrayItem(ET.XML(ET.tostring(self.element)), self.definition)
def GetAttribute(self, name):
return self.element.attrib[name]
def GetProperty(self, name):
subelement = self.element.find(name)
if ET.iselement(subelement):
data = subelement.text
elif self.definition and ET.iselement(self.definition.find(name)):
self.SetProperty(name,'')
return ''
else:
error_str = "%s not found in %s." % (name, self.element.tag)
log.debug(error_str)
raise PropertyNotFoundError(error_str)
if data:
data = data.strip()
if ET.iselement(self.definition):
try:
type = self.definition.find(name).text
property = self.ApplyType(data, type)
except ValueError:
property = self.ApplyType("", type)
except AttributeError:
return data
else:
property = data
return property
if ET.iselement(self.definition):
try:
type = self.definition.find(name).text
return self.ApplyType("", type)
except AttributeError:
return ""
else:
return ""
def GetText(self):
return self.element.text
def PrettyPrint(self):
return ET.tostring(self.element)
def ReplaceChild(self, replacement, toBeReplaced):
for i in range(len(self.element)):
if self.element[i] == toBeReplaced.element:
self.element[i] = replacement.element
def RemoveChild(self, child):
if type(child) == type(self):
self.element.remove(child.element)
else:
subelement = self.element.find(child)
self.element.remove(subelement)
def SaveState(self, queue, source=1):
#return
if source == 1:
source = self
if source:
queue.append([self.parent, source, self.GetCopy()])
else:
queue.append([self.parent, None, self.GetCopy()])
def SetAttribute(self, name, value):
try:
oldValue = self.element.attrib[name]
except KeyError:
oldValue = None
if value != oldValue or oldValue == None :
self.SetChanged(True)
self.element.attrib[name] = value
def SetProperty(self, name, data):
type = "string"
property = self.element.find(name)
if ET.iselement(self.definition) and ET.iselement(property):
try:
type = self.definition.find(name).text
if not self.ApplyType(data, type) == self.GetProperty(name):
self.SetChanged(True)
except ValueError:
log.debug(data + " is not " + type + ", skipped")
return
except AttributeError:
log.debug("Index error. Element does not exist.")
except PropertyNotFoundError:
log.debug("Index error. Element does not exist.")
if ET.iselement(property):
property.text = unicode(data)
else:
newElement = ET.SubElement(self.element,name)
newElement.text = unicode(data)
def SetText(self, text):
self.element.text = text
def AddChild(self, newItem):
self.element.append(newItem.element)
newItem.parent = self
self.SetChanged(True)
def GetElement(self):
return self.element
def GetChildren(self, name):
children = []
kids = self.element.getiterator(name)
for kid in kids:
children.append(TrayItem(kid, None, self))
return children
if __name__ == "__main__":
import controller
logging.basicConfig()
cont = controller.Controller(["U:/Personal/Programming/pyTray/src/","//xtend/biopshare/Thomas/Screens/pyTray_Files/test.exp"])
print ET.tostring(cont.data.screenElement)
data.Save()
| {
"content_hash": "363253ab3281802697ef6d2ba53a7bde",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 130,
"avg_line_length": 32.02312138728324,
"alnum_prop": 0.5494584837545127,
"repo_name": "tschalch/pyTray",
"id": "39ef770bd007d4f758be310879b2a3d3173cebaa",
"size": "5566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/dataStructures/elmtree_tray_item.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1930533"
}
],
"symlink_target": ""
} |
def lazy(func):
def wrapper(self, *args):
if not self.data:
self.load()
return func(self, *args)
return wrapper
| {
"content_hash": "db14bb21ef32d21a5332d1b4a1b9662b",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 32,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.5472972972972973,
"repo_name": "diegor2/redditbot",
"id": "2e7b38ea92a7ce601c72e70aaf3f798dce6931c8",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/persist/decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "205"
},
{
"name": "Python",
"bytes": "6573"
}
],
"symlink_target": ""
} |
import context
# our own packages
from base import TestSystemCalcBase
# Monkey patching for unit tests
import patches
# tested classes
from delegates import BatteryData
class MockBatteryConfiguration(object):
def __init__(self, service, name, enabled):
self.service = str(service)
self.name = None if name is None else str(name)
self.enabled = bool(enabled)
def mock_load_configured_batteries(instance, configs, *args):
instance.configured_batteries = {y.service: y for y in (MockBatteryConfiguration(x["service"],
x.get("name", None), x.get("enabled", False)) for x in configs)}
instance.confcount = len(configs)
class TestHubSystem(TestSystemCalcBase):
def __init__(self, methodName='runTest'):
TestSystemCalcBase.__init__(self, methodName)
def setUp(self):
TestSystemCalcBase.setUp(self)
self._add_device('com.victronenergy.battery.ttyO1',
product_name='battery',
values={
'/Dc/0/Voltage': 12.15,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Dc/0/Temperature': 25,
'/Soc': 15.3,
'/DeviceInstance': 0})
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 12.15,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Dc/0/Temperature': 25,
'/Soc': 15.3,
'/DeviceInstance': 1,
'/CustomName': 'Sled battery'})
def test_batteries_path(self):
mock_load_configured_batteries(BatteryData.instance, [
{"name": None, "service": "com.victronenergy.battery/0", "enabled": True},
{"name": None, "service": "com.victronenergy.battery/1", "enabled": True},
])
self._update_values(5000)
data = self._service._dbusobjects['/Batteries']
self.assertTrue(len(data) == 2)
# Check battery service selection
di = {b['instance']: b['active_battery_service'] for b in data}
self.assertEqual(di, {0: True, 1: False})
# Check customname
di = {b['instance']: b['name'] for b in data}
self.assertEqual(di, {0: "battery", 1: "Sled battery"})
for b in data:
for f in ("id", "voltage", "current", "name"):
assert f in b
def test_battery_naming(self):
# Alternate name is used over ProductName|CustomName if available
mock_load_configured_batteries(BatteryData.instance, [
{"name": "Thruster Bank", "service": "com.victronenergy.battery/1", "enabled": True},
])
self._update_values(5000)
data = self._service._dbusobjects['/Batteries']
# Check that name matches config
di = {b['instance']: b['name'] for b in data}
self.assertEqual(di[1], "Thruster Bank")
def test_main_battery_always_listed(self):
# Main battery is always shown, even with no config
mock_load_configured_batteries(BatteryData.instance, [])
self._update_values(5000)
data = self._service._dbusobjects['/Batteries']
self.assertTrue(len(data) == 1)
self.assertEqual(data[0]['name'], "battery")
| {
"content_hash": "cfc90e5ae97c6588fdff865731477e39",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 95,
"avg_line_length": 30.869565217391305,
"alnum_prop": 0.6785211267605634,
"repo_name": "victronenergy/dbus-systemcalc-py",
"id": "5a36024061008120824b081d0f779bc67af864ee",
"size": "2896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/batterydata_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2468"
},
{
"name": "Python",
"bytes": "493722"
}
],
"symlink_target": ""
} |
import os
import sys
import tempfile
path = os.path.abspath(os.path.join(os.path.dirname(__file__)))
sys.path.append(os.path.join(path, '..'))
import carpet
#### Accessory functions ####
# This is an example of a core function, which takes a file and produces
# an output file.
def puts_hello_into_file(file_in, file_out):
with open(file_in) as fi:
content = fi.read()
content += "hello"
with open(file_out, "w") as fo:
fo.write(content)
def create_dummy_file(extension=""):
tmp = tempfile.mktemp() + extension
with open(tmp, "w") as tf:
tf.write("hello!")
return tmp
###############
#### Tests ####
###############
PutsHello = carpet.create_context_class(puts_hello_into_file)
### Tests against simple context classes ###
def test_creates_tmp_file():
dummy_file = create_dummy_file()
with PutsHello(dummy_file) as tmp_file:
assert os.path.isfile(tmp_file)
def test_removes_tmp_file():
dummy_file = create_dummy_file()
with PutsHello(dummy_file) as tmp_file:
pass
assert not os.path.exists(tmp_file)
def test_doesnt_remove_if_asked():
dummy_file = create_dummy_file()
with PutsHello(dummy_file, remove_at_exit=False) as tmp_file:
assert os.path.isfile(tmp_file)
assert os.path.isfile(tmp_file)
os.remove(tmp_file)
def test_functionality_is_conserved():
fname_in = create_dummy_file()
fname_out_fn = tempfile.mktemp()
puts_hello_into_file(fname_in, fname_out_fn)
with PutsHello(fname_in) as tmp_file_hello:
with open(fname_out_fn) as fh, open(tmp_file_hello) as ftmp:
assert fh.read() == ftmp.read()
def test_right_extension_string():
PutsHelloExtension = carpet.create_context_class(puts_hello_into_file,
output_extension=".hello")
dummy = create_dummy_file()
with PutsHelloExtension(dummy) as temp_file:
assert temp_file.endswith(".hello")
def test_right_extension_function():
# creates same extension as input file
create_extension = lambda filein: filein.split('.')[-1]
PutsHelloExtension = carpet.create_context_class(puts_hello_into_file,
output_extension=create_extension)
dummy = create_dummy_file(extension=".pepe")
with PutsHelloExtension(dummy) as temp_file:
assert temp_file.endswith(".pepe")
| {
"content_hash": "666c678a5bdee176fcc684e344fbe126",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 74,
"avg_line_length": 29.96153846153846,
"alnum_prop": 0.6598202824133504,
"repo_name": "alvaroabascar/carpet",
"id": "4f992fdeb8711a116509b4c49037d58bce2eed49",
"size": "2338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_1_one_file_in_one_file_out.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7439"
}
],
"symlink_target": ""
} |
"""
Written by Lucas Sinclair.
MIT Licensed.
Contact at www.sinclair.bio
"""
# Built-in modules #
# Internal modules #
# First party modules #
# Third party modules #
from Bio import Entrez
# Constants #
Entrez.email = "I don't know who will be running this script"
###############################################################################
def acc_to_fasta(accessions):
"""
Pass a list of accessions IDs as argument and a string representing
a FASTA is returned.
"""
entries = Entrez.efetch(db = "nuccore",
id = accessions,
rettype = "fasta",
retmode = "xml")
records = Entrez.read(entries)
return records
###############################################################################
class NCBIOldStuff(object):
"""
An object that takes care of extracting NCBI information via the eutils
http://www.ncbi.nlm.nih.gov/books/NBK25499/
GI numbers have been deprecated, NCBI suggests to only use the accession
numbers. See:
https://ncbiinsights.ncbi.nlm.nih.gov/2016/07/15/
ncbi-is-phasing-out-sequence-gis-heres-what-you-need-to-know/
"""
#-------------------------------------------------------------------------#
def gi_num_to_tax(self, id_num):
"""
How to convert a single GI identification number to taxonomy info
Can also accept a list of GI numbers in the parameter `id_num`
"""
if isinstance(id_num, list):
gb_entries = Entrez.efetch(db="nuccore", id=id_num, rettype="fasta",
retmode="xml")
gb_records = Entrez.read(gb_entries)
return gb_records
else:
gb_entry = Entrez.efetch(db="nuccore", id=id_num, rettype="fasta",
retmode="xml")
gb_records = Entrez.read(gb_entry)
tax_num = gb_records[0]['TSeq_taxid']
handle = Entrez.efetch(db="Taxonomy", id=tax_num, retmode="xml")
records = Entrez.read(handle)
return records[0]['Lineage']
#-------------------------------------------------------------------------#
def gis_to_records(self, gis, progress=True):
"""
Download information from NCBI in batch mode.
Return a dictionary with GI numbers as keys and records
as values.
"""
# Should we display progress ? #
from tqdm import tqdm
progress = tqdm if progress else lambda x:x
# Do it by chunks #
gis = list(gis)
at_a_time = 400
result = {}
# Main loop #
for i in progress(range(0, len(gis), at_a_time)):
chunk = gis[i:i+at_a_time]
chunk = map(str, chunk)
records = self.chunk_to_records(chunk)
result.update(dict(zip(chunk, records)))
# Return #
return result
def chunk_to_records(self, chunk, validate=False):
"""
Download from NCBI until it works. Will restart until reaching the python
recursion limit. We don't want to get banned from NCBI so we have a little
pause at every function call.
"""
from Bio.Entrez.Parser import CorruptedXMLError
from urllib2 import HTTPError
import time
time.sleep(0.5)
try:
response = Entrez.efetch(db="nuccore", id=chunk, retmode="xml")
records = list(Entrez.parse(response, validate=validate))
return records
except (HTTPError, CorruptedXMLError):
print("\nFailed downloading %i records, trying again\n" % len(chunk))
return self.chunk_to_records(chunk)
#-------------------------------------------------------------------------#
def record_to_taxonomy(self, record): return record['GBSeq_taxonomy']
def record_to_source(self, record):
qualifiers = record['GBSeq_feature-table'][0]['GBFeature_quals']
for qualifier in qualifiers:
if qualifier['GBQualifier_name'] == 'isolation_source':
return qualifier['GBQualifier_value'] | {
"content_hash": "b317726b51c6eaaaa378171af3365708",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 82,
"avg_line_length": 37.401785714285715,
"alnum_prop": 0.5294819766053951,
"repo_name": "xapple/seqsearch",
"id": "ad19449736a1d7a0c37dd6d3542edbe3525921af",
"size": "4237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seqsearch/databases/download.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64077"
}
],
"symlink_target": ""
} |
"""
Instrumentation for Tornado
"""
import elasticapm
from elasticapm.conf import constants
from elasticapm.instrumentation.packages.asyncio.base import AbstractInstrumentedModule, AsyncAbstractInstrumentedModule
from elasticapm.traces import capture_span
from elasticapm.utils.disttracing import TraceParent
class TornadoRequestExecuteInstrumentation(AsyncAbstractInstrumentedModule):
name = "tornado_request_execute"
creates_transactions = True
instrument_list = [("tornado.web", "RequestHandler._execute")]
async def call(self, module, method, wrapped, instance, args, kwargs):
if not hasattr(instance.application, "elasticapm_client"):
# If tornado was instrumented but not as the main framework
# (i.e. in Flower), we should skip it.
return await wrapped(*args, **kwargs)
# Late import to avoid ImportErrors
from elasticapm.contrib.tornado.utils import get_data_from_request, get_data_from_response
request = instance.request
client = instance.application.elasticapm_client
should_ignore = client.should_ignore_url(request.path)
if not should_ignore:
trace_parent = TraceParent.from_headers(request.headers)
client.begin_transaction("request", trace_parent=trace_parent)
elasticapm.set_context(
lambda: get_data_from_request(instance, request, client.config, constants.TRANSACTION), "request"
)
# TODO: Can we somehow incorporate the routing rule itself here?
elasticapm.set_transaction_name("{} {}".format(request.method, type(instance).__name__), override=False)
ret = await wrapped(*args, **kwargs)
if not should_ignore:
elasticapm.set_context(
lambda: get_data_from_response(instance, client.config, constants.TRANSACTION), "response"
)
status = instance.get_status()
result = "HTTP {}xx".format(status // 100)
elasticapm.set_transaction_result(result, override=False)
elasticapm.set_transaction_outcome(http_status_code=status)
client.end_transaction()
return ret
class TornadoHandleRequestExceptionInstrumentation(AbstractInstrumentedModule):
name = "tornado_handle_request_exception"
instrument_list = [("tornado.web", "RequestHandler._handle_request_exception")]
def call(self, module, method, wrapped, instance, args, kwargs):
if not hasattr(instance.application, "elasticapm_client"):
# If tornado was instrumented but not as the main framework
# (i.e. in Flower), we should skip it.
return wrapped(*args, **kwargs)
# Late import to avoid ImportErrors
from tornado.web import Finish, HTTPError
from elasticapm.contrib.tornado.utils import get_data_from_request
e = args[0]
if isinstance(e, Finish):
# Not an error; Finish is an exception that ends a request without an error response
return wrapped(*args, **kwargs)
client = instance.application.elasticapm_client
request = instance.request
client.capture_exception(
context={"request": get_data_from_request(instance, request, client.config, constants.ERROR)}
)
elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE)
if isinstance(e, HTTPError):
elasticapm.set_transaction_result("HTTP {}xx".format(int(e.status_code / 100)), override=False)
elasticapm.set_context({"status_code": e.status_code}, "response")
else:
elasticapm.set_transaction_result("HTTP 5xx", override=False)
elasticapm.set_context({"status_code": 500}, "response")
return wrapped(*args, **kwargs)
class TornadoRenderInstrumentation(AbstractInstrumentedModule):
name = "tornado_render"
instrument_list = [("tornado.web", "RequestHandler.render")]
def call(self, module, method, wrapped, instance, args, kwargs):
if "template_name" in kwargs:
name = kwargs["template_name"]
else:
name = args[0]
with capture_span(name, span_type="template", span_subtype="tornado", span_action="render"):
return wrapped(*args, **kwargs)
| {
"content_hash": "7bc15f7e638725263a57eb247e540255",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 120,
"avg_line_length": 42.75247524752475,
"alnum_prop": 0.667901806391848,
"repo_name": "beniwohli/apm-agent-python",
"id": "654deb3828e309f1a2f8459898d39d9442d75878",
"size": "5909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elasticapm/instrumentation/packages/tornado.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1685"
},
{
"name": "C",
"bytes": "81870"
},
{
"name": "Dockerfile",
"bytes": "1730"
},
{
"name": "Gherkin",
"bytes": "10997"
},
{
"name": "Groovy",
"bytes": "5676"
},
{
"name": "HTML",
"bytes": "560"
},
{
"name": "Makefile",
"bytes": "885"
},
{
"name": "Python",
"bytes": "1660078"
},
{
"name": "Shell",
"bytes": "12434"
}
],
"symlink_target": ""
} |
import sys
from oslo.config import cfg
r_opts = [
cfg.StrOpt(
'host',
default='127.0.0.1',
help='RabbitMQ host'),
cfg.StrOpt(
'user',
default='guest',
help='RabbitMQ user'),
cfg.StrOpt(
'passwd',
default='guest',
help='RabbitMQ password'),
cfg.StrOpt(
'vhost',
default='/',
help='RabbitMQ Virtual Host'),
cfg.IntOpt(
'retry',
default=30,
help='RabbitMQ retry interval'),
cfg.StrOpt(
'exchange',
default='JobRunner',
help='RabbitMQ exchange'),
cfg.IntOpt(
'port',
default=5672,
help='RabbitMQ port'),
cfg.DictOpt(
'ssl',
default=None,
help='RabbitMQ SSL options'),
]
CONF = cfg.CONF
CONF.register_opts(r_opts, 'rabbitMQ')
BROKER_HOST = "%s:%s" % (CONF.rabbitMQ.host, CONF.rabbitMQ.port)
BROKER_USER = CONF.rabbitMQ.user
BROKER_PASSWD = CONF.rabbitMQ.passwd
BROKER_VHOST = CONF.rabbitMQ.vhost
BROKER_RETRY_INTERVAL = CONF.rabbitMQ.retry
BROKER_EXCHANGE = CONF.rabbitMQ.exchange
BROKER_SSL_OPTS = CONF.rabbitMQ.ssl
def has_gevent_enabled():
if 'gevent' in sys.modules:
# check if monkey patched
import gevent.socket as gevent_sock
import socket
if gevent_sock.socket is socket.socket:
return True
return False
if has_gevent_enabled():
import gevent.queue as q
Queue = q.Queue
else:
import Queue
Queue = Queue.Queue
| {
"content_hash": "aebf7e37502dada5b28fece028cbec22",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 64,
"avg_line_length": 21.942028985507246,
"alnum_prop": 0.5924702774108322,
"repo_name": "gabriel-samfira/jrunner",
"id": "87a9be43ccede794ff5fa531c0d011a8cb42baff",
"size": "2121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jrunner/common/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "122690"
},
{
"name": "Shell",
"bytes": "2903"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HUnitR06_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HUnitR06_CompleteLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HUnitR06_CompleteLHS, self).__init__(name='HUnitR06_CompleteLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitR06_CompleteLHS')
self["equations"] = []
# Set the node attributes
# match class PhysicalNode(6.0.m.0PhysicalNode) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__PhysicalNode"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'6.0.m.0PhysicalNode')
# match class Partition(6.0.m.1Partition) node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """return True"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__Partition"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'6.0.m.1Partition')
# apply class SwcToEcuMapping(6.0.a.0SwcToEcuMapping) node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """return True"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["mm__"] = """MT_pre__SwcToEcuMapping"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'6.0.a.0SwcToEcuMapping')
# apply class EcuInstance(6.0.a.1EcuInstance) node
self.add_node()
self.vs[3]["MT_pre__attr1"] = """return True"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["mm__"] = """MT_pre__EcuInstance"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'6.0.a.1EcuInstance')
# match association PhysicalNode--partition-->Partitionnode
self.add_node()
self.vs[4]["MT_pre__attr1"] = """return attr_value == "partition" """
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["mm__"] = """MT_pre__directLink_S"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'6.0.m.0PhysicalNodeassoc46.0.m.1Partition')
# apply association SwcToEcuMapping--ecuInstance-->EcuInstancenode
self.add_node()
self.vs[5]["MT_pre__attr1"] = """return attr_value == "ecuInstance" """
self.vs[5]["MT_label__"] = """6"""
self.vs[5]["mm__"] = """MT_pre__directLink_T"""
self.vs[5]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'6.0.a.0SwcToEcuMappingassoc56.0.a.1EcuInstance')
# trace association EcuInstance--trace-->PhysicalNodenode
self.add_node()
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["mm__"] = """MT_pre__trace_link"""
self.vs[6]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'6.0.a.1EcuInstanceassoc66.0.m.0PhysicalNode')
# trace association SwcToEcuMapping--trace-->Partitionnode
self.add_node()
self.vs[7]["MT_label__"] = """8"""
self.vs[7]["mm__"] = """MT_pre__trace_link"""
self.vs[7]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'6.0.a.0SwcToEcuMappingassoc76.0.m.1Partition')
# Add the edges
self.add_edges([
(0,4), # match class PhysicalNode(6.0.m.0PhysicalNode) -> association partition
(4,1), # association Partition -> match class Partition(6.0.m.1Partition)
(2,5), # apply class SwcToEcuMapping(6.0.a.0SwcToEcuMapping) -> association ecuInstance
(5,3), # association EcuInstance -> apply class EcuInstance(6.0.a.1EcuInstance)
(3,6), # apply class EcuInstance(6.0.m.0PhysicalNode) -> backward_association
(6,0), # backward_associationPhysicalNode -> match_class PhysicalNode(6.0.m.0PhysicalNode)
(2,7), # apply class SwcToEcuMapping(6.0.m.1Partition) -> backward_association
(7,1), # backward_associationPartition -> match_class Partition(6.0.m.1Partition)
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
def eval_attr12(self, attr_value, this):
return True
# define evaluation methods for each apply class.
def eval_attr13(self, attr_value, this):
return True
def eval_attr14(self, attr_value, this):
return True
# define evaluation methods for each match association.
def eval_attr15(self, attr_value, this):
return attr_value == "partition"
# define evaluation methods for each apply association.
def eval_attr16(self, attr_value, this):
return attr_value == "ecuInstance"
def constraint(self, PreNode, graph):
return True
| {
"content_hash": "987d3616caf8841e41bba5b028ce802d",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 104,
"avg_line_length": 37.525,
"alnum_prop": 0.6620031090384189,
"repo_name": "levilucio/SyVOLT",
"id": "8e7d39cfee73508db419cd13deb5d4f0794adae4",
"size": "4503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GM2AUTOSAR_MM/Properties/unit_contracts/HUnitR06_CompleteLHS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
import xml.etree.cElementTree as ET
import Pyrex.Plex
import StringIO
import re
import copy
class QuillManual(object):
def __init__(self, langDefFile):
self.codeChars = 'abcdefghijklmnopqrstuvwxyz'
if langDefFile != None:
self.loadPrimaryDef(langDefFile)
def codeGen(self):
firstIndex = 0
secondIndex = 0
while firstIndex < len(self.codeChars) and secondIndex < len(self.codeChars):
code = self.codeChars[firstIndex]+ self.codeChars[secondIndex].upper()
secondIndex += 1
if secondIndex == len(self.codeChars):
secondIndex = 0
firstIndex += 1
yield code
def loadPrimaryDef(self, langDefFile):
try:
f = open(langDefFile, 'r')
except IOError:
print "Can't load the primary definition file"
return False
print "Loading .. " + langDefFile
lang = ET.parse(f)
primdef = lang.getroot()
cGen = self.codeGen()
self.preProcs = []
self.propsMap = {}
self.lit2token = {}
self.token2code = {}
self.code2token = {}
self.token2lit = {}
self.lexicon = None
self.helperLexicon = None
for tree in primdef.getchildren():
if tree.tag == 'codemap':
lexiconParams = []
for mapping in tree.getchildren():
lit = mapping.attrib['name']
code = eval(mapping.attrib['code'])
prop = mapping.attrib['prop']
litToken = cGen.next()
if self.lit2token.has_key(lit):
print "Duplicate lit name"
assert(False)
else:
self.lit2token[lit] = litToken
self.token2lit[litToken] = lit
lexiconParams.append((Pyrex.Plex.Str(lit), litToken))
self.propsMap[lit] = [litToken]
self.token2code[litToken] = code
self.code2token[code] = litToken
if self.propsMap.has_key(prop) == True:
self.propsMap[prop].append(litToken)
else:
self.propsMap[prop] = [litToken]
lexiconParams.append((Pyrex.Plex.AnyChar, '#'))
self.lexicon = Pyrex.Plex.Lexicon(lexiconParams)
elif tree.tag == 'render-rules':
self.renderRules = {}
for rule in tree.getchildren():
lit = rule.attrib['lit']
token = self.compileStr(lit)
self.renderRules[token] = []
for producer in rule.getchildren():
reStr = producer.attrib['regex']
replaceStr = producer.attrib['replace']
reStrC = self.compileRe(reStr)
replaceC = self.compileStr(replaceStr)
self.renderRules[token].append((reStrC, replaceC))
elif tree.tag == 'utolit-rules':
self.utolitRules = {}
for rule in tree.getchildren():
lit = rule.attrib['unicode']
token = self.compileStr(lit)
self.utolitRules[token] = []
for producer in rule.getchildren():
reStr = producer.attrib['regex']
replaceTuple = eval(producer.attrib['replace'])
reStrC = self.compileRe(reStr)
replaceC = ''
for t in replaceTuple:
replaceC += self.lit2token[t]
self.utolitRules[token].append((reStrC, replaceC))
elif tree.tag == 'akshara':
aksharaPatternStr = tree.attrib['regex']
self.aksharaPattern = self.compileRe(aksharaPatternStr)
self.aksharaPattern +='|(#.)'
elif tree.tag == 'helper-groups':
self.helperRules = {}
self.primary2helper = {}
lexiconParams = []
for helper in tree.getchildren():
key = helper.attrib['key']
regex = helper.attrib['regex']
options = eval(helper.attrib['options'])
self.helperRules[key] = (regex, options)
lexiconParams.append((Pyrex.Plex.Str(key), Pyrex.Plex.TEXT))
for opt in options:
if self.primary2helper.has_key(opt) == False:
self.primary2helper[opt] = key
lexiconParams.append((Pyrex.Plex.AnyChar, Pyrex.Plex.TEXT))
self.helperLexicon = Pyrex.Plex.Lexicon(lexiconParams)
elif tree.tag == 'preprocessor':
regex = eval(tree.attrib['regex'])
value = eval(tree.attrib['value'])
self.preProcs.append((regex, value))
f.close()
def compileStr(self, litstr):
m=re.compile(r'_([^_]+)_')
result = m.search(litstr)
while result:
lit = result.group(1)
token = self.lit2token[lit]
toReplace = litstr[result.start():result.end()]
litstr = litstr.replace(toReplace, token)
result = m.search(litstr)
finalStr = litstr
return finalStr
def compileRe(self, reStr):
m=re.compile(r'_([^_]+)_')
result = m.search(reStr)
while result:
prop = result.group(1)
orList = self.propsMap[prop]
orRegex = '(?:'+'|'.join(orList)+')'
toReplace = reStr[result.start():result.end()]
reStr = reStr.replace(toReplace, orRegex)
result = m.search(reStr)
finalRegex = reStr
return finalRegex
def toPrimaryTokens(self, literal):
strIO = StringIO.StringIO(literal)
scanner = Pyrex.Plex.Scanner(self.lexicon, strIO, "LitScanner")
tokenList = []
while True:
token = scanner.read()
if token[0] == None:
break
elif token[0] == '#':
tokenList.append('#'+token[1])
else:
tokenList.append(token[0])
return tokenList
def tokensToCodes(self, tokens, markerRange=None):
codeStr = u''
counter = 0
newStart = 0
newStop = 0
tokensStr = ''.join(tokens)
aksharaIter = re.finditer(self.aksharaPattern, tokensStr)
aksharaList = [akshara.group() for akshara in aksharaIter if len(akshara.group())>0]
counter = 0
startMarked = False
stopMarked = False
tokenCounter = 0
for akshara in aksharaList:
tokens = [akshara[i:i+2] for i in range(0, len(akshara), 2)]
tempStart = counter
for t in tokens:
if t[0] != '#':
codeStr += self.token2code[t]
counter += len(self.token2code[t])
else:
codeStr += t[1]
counter += len(t[1])
tokenCounter +=1
if markerRange != None and startMarked == False:
if tokenCounter >= (markerRange[0]+1):
newStart = tempStart
startMarked = True
if markerRange != None and stopMarked == False:
if tokenCounter >= markerRange[1]:
newStop = counter
stopMarked = True
mRange = None
if newStop > newStart:
mRange = (newStart, newStop)
return (codeStr, mRange)
def literalPreProc(self, literal):
procs = self.preProcs
for (rule, subst) in procs:
literal = re.sub(rule, subst, literal)
return literal
def primaryToUnicode(self, literal, markerRange = None):
tokenList = self.toPrimaryTokens(literal)
tokenListFinal = []
tokenStrFinal = ''
tokenStr = "".join(tokenList)
newMarkerRangeBegin = -1
newMarkerRangeEnd = -1
newMarkerRange = None
for (index, token) in enumerate(tokenList):
if token[0] != '#':
producers = self.renderRules[token]
bestMatchLen=0
currRepl = ''
for (regStr, repl) in producers:
iter = re.finditer(regStr, tokenStr)
for match in iter:
matchLen = len(match.group())
matchIndex = match.start(1)
if matchLen > bestMatchLen and matchIndex == 2*index: #each token is guaranteed to be of length 2. so, index in str will be 2*index in list
currRepl = repl
bestMatchLen = matchLen
else:
currRepl = token
if markerRange != None:
if index == markerRange[0]:
newMarkerRangeBegin = len(tokenStrFinal)/2
newMarkerRangeEnd = newMarkerRangeBegin + len(currRepl)/2
elif index > markerRange[0] and index < markerRange[1]:
newMarkerRangeEnd += len(currRepl)/2
tokenStrFinal += currRepl
tokenListFinal = [tokenStrFinal[i:i+2] for i in range(0, len(tokenStrFinal), 2)]
if newMarkerRangeBegin >= 0 and newMarkerRangeEnd <= len(tokenListFinal):
newMarkerRange = (newMarkerRangeBegin, newMarkerRangeEnd)
(codeStr, retMarkerRange) = self.tokensToCodes(tokenListFinal, newMarkerRange)
return (codeStr, retMarkerRange)
def checkProp(self, token, prop):
if token in self.propsMap[prop]:
return True
return False
def unicodeToPrimaryOld(self, uStr):
tokenList = []
for uChar in uStr:
try:
tokenList.append(self.code2token[uChar])
except KeyError:
tokenList.append('#'+uChar)
prevIsCons = False
tokenListInternal = []
for (i, tk) in enumerate(tokenList):
if prevIsCons == True and self.checkProp(tk, 'nukta'):
tokenListInternal.append(tk)
continue
if prevIsCons == True:
if self.checkProp(tk, 'cons'):
tokenListInternal.append(self.lit2token['a0'])
tokenListInternal.append(tk)
elif self.checkProp(tk, 'vowel'):
tokenListInternal.append(self.lit2token['a0'])
tokenListInternal.append(tk)
prevIsCons = False
elif self.checkProp(tk, 'dot'):
if i == len(tokenList)-1 or self.checkProp(tokenList[i+1], 'cons') == True:
tokenListInternal.append(self.lit2token['a0'])
prevIsCons = False
tokenListInternal.append(tk)
elif self.checkProp(tk, 'halanth') and i<(len(tokenList)-1):
if self.checkProp(tokenList[i+1], 'cons') == False:
tokenListInternal.append(tk)
prevIsCons = False
else:
tokenListInternal.append(tk)
prevIsCons = False
else:
if self.checkProp(tk, 'cons'):
tokenListInternal.append(tk)
prevIsCons = True
else:
tokenListInternal.append(tk)
if prevIsCons == True and len(uStr)==1:
tokenListInternal.append(self.lit2token['a0'])
literal = ''
for tk in tokenListInternal:
if tk[0] != '#':
literal += self.token2lit[tk]
else:
literal += tk[1]
literal = ''.join([x for x in literal if x != '0'])
return literal
def unicodeToPrimary(self, uStr):
tokenList = []
for uChar in uStr:
try:
tokenList.append(self.code2token[uChar])
except KeyError:
tokenList.append('#'+uChar)
tokenStr = ''.join(tokenList)
tokenStrInternal = ''
for (index, token) in enumerate(tokenList):
if token[0] != '#':
producers = self.utolitRules[token]
bestMatchLen=0
currRepl = ''
for (regStr, repl) in producers:
iter = re.finditer(regStr, tokenStr)
for match in iter:
matchLen = len(match.group())#group with no arguments will return the entire matched string
matchIndex = match.start(1)
#if matchLen >= bestMatchLen and matchIndex == index:
if matchLen > bestMatchLen and matchIndex == 2*index: #each token is guaranteed to be of length 2. so, index in str will be 2*index in list
currRepl = repl
bestMatchLen = matchLen
else:
currRepl = token
tokenStrInternal += currRepl
epsilon = self.lit2token['EPS']
tokenListInternal = [tokenStrInternal[i:i+2] for i in range(0, len(tokenStrInternal), 2) if tokenStrInternal[i:i+2] != epsilon]
literal = ''
for tk in tokenListInternal:
if tk[0] != '#':
literal += self.token2lit[tk]
else:
literal += tk[1]
literal = ''.join([x for x in literal if x != '0'])
return literal
def unicodeToHelperPair(self, uStr):
primary = self.unicodeToPrimary(uStr)
uStrNew = self.primaryToUnicode(primary, None)[0]
helper = self.unicodeToHelperStr(uStrNew)
return (helper, uStrNew)
def unicodeToHelperStr(self, uStr):
primaryStr = self.unicodeToPrimary(uStr)
strIO = StringIO.StringIO(primaryStr)
scanner = Pyrex.Plex.Scanner(self.lexicon, strIO, "LitScanner")
tokenList = []
while True:
token = scanner.read()
if token[0] == None:
break
else:
tokenList.append(token[1])
helperStr = ''
for token in tokenList:
try:
helperStr += self.primary2helper[token]
except KeyError:
helperStr += token
return helperStr
def getInsertCorrections(self, currHelper, currUstr, pos, delta):
(helperTokens, primaryTokens) = self.getTokenListPair(currHelper, currUstr)
if (len(helperTokens) != len(primaryTokens)) or (pos < 0 or pos > len(currHelper)):
return ((currHelper, None), [(currUstr, None)])
leftSlice = currHelper[0:pos]
midSlice = delta
rightSlice = currHelper[pos:]
newHelper = leftSlice+midSlice+rightSlice
newHelperTokens = self.toHelperTokens(newHelper)
outListLeft = []
newStart = 0
oldTokensRetained = 0
parseLen = 0
if pos > 0 and oldTokensRetained < len(primaryTokens):
index = 0
while parseLen < pos and (helperTokens[index][0]==newHelperTokens[index][0]):
outListLeft.append([primaryTokens[index][0]])
oldTokensRetained += 1
parseLen += helperTokens[index][1]
index += 1
newStart = index
litStart = parseLen
outListRight = []
newStop = len(newHelperTokens)
parseLen = 0
if pos < len(currHelper) and oldTokensRetained < len(primaryTokens):
oldIndex = len(primaryTokens) - 1
newIndex = len(newHelperTokens) - 1
while parseLen < (len(currHelper)-pos) and (helperTokens[oldIndex][0]==newHelperTokens[newIndex][0]):
outListRight.append([primaryTokens[oldIndex][0]])
oldTokensRetained += 1
parseLen += helperTokens[oldIndex][1]
oldIndex -= 1
newIndex -= 1
newStop = newIndex + 1
outListRight.reverse()
litStop = len(newHelper)-parseLen
outListMiddle = []
for i in range(newStart, newStop):
try:
options = self.helperRules[newHelperTokens[i][0]][1]
except KeyError:
options = [newHelperTokens[i][0]]
outListMiddle.append(options)
outList = []
outList.extend(outListLeft)
outList.extend(outListMiddle)
outList.extend(outListRight)
tokenStart = newStart
tokenStop = newStop
uLitList = [([], (tokenStart, tokenStop))]
count = 1
for (i, options) in enumerate(outList):
count = count*len(options)
newList=[]
for eachOption in options:
temp = copy.deepcopy(uLitList)
for x in temp:
x[0].append(eachOption)
newList.extend(temp)
uLitList = newList
uStrList = []
for litList in uLitList:
iLit = ''.join(litList[0])
uStrList.append(self.primaryToUnicode(iLit, litList[1]))
markerRange = None
if litStop > litStart:
markerRange = (litStart, litStop)
return ((newHelper, markerRange), uStrList)
def getDeleteCorrections(self, currHelper, currUstr, pos, delLen):
(helperTokens, primaryTokens) = self.getTokenListPair(currHelper, currUstr)
if (len(helperTokens) != len(primaryTokens)) or (pos < 0 or pos >= len(currHelper)):
return ((currHelper, None), [(currUstr, None)])
newHelper = list(currHelper)
del newHelper[pos:pos+delLen]
newHelper = ''.join(newHelper)
newHelperTokens = self.toHelperTokens(newHelper)
outListLeft = []
newStart = 0
oldTokensRetained = 0
parseLen = 0
if pos > 0 and oldTokensRetained < len(primaryTokens):
index = 0
while (parseLen < pos) and (helperTokens[index][0]==newHelperTokens[index][0]):
outListLeft.append([primaryTokens[index][0]])
oldTokensRetained += 1
parseLen += helperTokens[index][1]
index += 1
newStart = index
litStart = parseLen
outListRight = []
parseLen =0
newStop = len(newHelperTokens)
if (pos+delLen) < len(currHelper) and oldTokensRetained < len(primaryTokens):
oldIndex = len(primaryTokens) - 1
newIndex = len(newHelperTokens) - 1
while parseLen < (len(currHelper)-pos-delLen) and (helperTokens[oldIndex][0]==newHelperTokens[newIndex][0]):
outListRight.append([primaryTokens[oldIndex][0]])
oldTokensRetained += 1
parseLen += helperTokens[oldIndex][1]
oldIndex -= 1
newIndex -= 1
newStop = newIndex + 1
outListRight.reverse()
litStop = len(newHelper)-parseLen
outListMiddle = []
for i in range(newStart, newStop):
try:
options = self.helperRules[newHelperTokens[i][0]][1]
except KeyError:
options = [newHelperTokens[i][0]]
outListMiddle.append(options)
outList = []
outList.extend(outListLeft)
outList.extend(outListMiddle)
outList.extend(outListRight)
tokenStart = newStart
tokenStop = newStop
uLitList = [([], (tokenStart, tokenStop))]
count = 1
for (i, options) in enumerate(outList):
count = count*len(options)
newList=[]
for eachOption in options:
temp = copy.deepcopy(uLitList)
for x in temp:
x[0].append(eachOption)
newList.extend(temp)
uLitList = newList
uStrList = []
for litList in uLitList:
iLit = ''.join(litList[0])
uStrList.append(self.primaryToUnicode(iLit, litList[1]))
markerRange = None
if litStop > litStart:
markerRange = (litStart, litStop)
return ((newHelper, markerRange), uStrList)
def getOptionsAt(self, currHelper, currUstr, pos):
(tokenList, currList) = self.getTokenListPair(currHelper, currUstr)
if len(currList) != len(tokenList):
return ((currHelper, None), [(currUstr, None)])
pos = max(1, pos)
pos = min(len(currHelper), pos)
posIndex = self.getTokenListPos(tokenList, pos)
outList = []
for (i, tu) in enumerate(currList):
if i == posIndex:
try:
options = self.helperRules[tokenList[i][0]][1]
except KeyError:
options = [tokenList[i][0]]
outList.append(options)
else:
outList.append([currList[i][0]])
litStart = 0
for i in range(posIndex):
litStart += len(tokenList[i][0])
litStop = litStart + len(tokenList[posIndex][0])
tokenStart = posIndex
tokenStop = posIndex + 1
uLitList = [([], (tokenStart, tokenStop))]
count = 1
for (i, options) in enumerate(outList):
count = count*len(options)
newList=[]
for eachOption in options:
temp = copy.deepcopy(uLitList)
for x in temp:
x[0].append(eachOption)
newList.extend(temp)
uLitList = newList
uStrList = []
for litList in uLitList:
iLit = ''.join(litList[0])
uStrList.append(self.primaryToUnicode(iLit, litList[1]))
markerRange = None
if litStop > litStart:
markerRange = (litStart, litStop)
return ((currHelper, markerRange), uStrList)
def toHelperTokens(self, currHelper):
strIO = StringIO.StringIO(currHelper)
scanner = Pyrex.Plex.Scanner(self.helperLexicon, strIO, "HelperLitScanner")
tokenList = []
while True:
token = scanner.read()
if token[0] == None:
break
else:
tokenList.append((token[0], len(token[0])))
return tokenList
def getTokenListPair(self, currHelper, currUstr):
currPrimary = self.unicodeToPrimary(currUstr)
tokenList = self.toHelperTokens(currHelper)
currList = []
unparsed = currPrimary
for (i, token) in enumerate(tokenList):
try:
options = self.helperRules[token[0]][1]
except KeyError:
options = [token[0]]
lexiconParams = []
for op in options:
lexiconParams.append((Pyrex.Plex.Str(op), Pyrex.Plex.TEXT ))
tmpLexicon = Pyrex.Plex.Lexicon(lexiconParams)
tempIO = StringIO.StringIO(unparsed)
tmpScanner = Pyrex.Plex.Scanner(tmpLexicon, tempIO, "TmpScanner")
tkn = tmpScanner.read()
if tkn == None:
assert(False)
charPos = tmpScanner.position()[2]
if charPos != 0:
assert(False)
currList.append((tkn[0], len(tkn[0])))
unparsed = unparsed[len(tkn[0]):]
if len(unparsed) == 0 and (i+1) < len(tokenList):
extn = [(self.helperRules[x][1][0], len(self.helperRules[x][1][0])) for (x, y) in tokenList[i+1:] ]
currList.extend(extn)
break
return (tokenList, currList)
def getTokenListPos(self, tokenList, litPos):
index = 0
currPos = 0
posIndex = -1
for (key, length) in tokenList:
currPos += length
if currPos >= litPos:
posIndex = index
break
else:
index += 1
return posIndex
| {
"content_hash": "b47151dec462a1bf8b9d975eb5e6ac7d",
"timestamp": "",
"source": "github",
"line_count": 694,
"max_line_length": 145,
"avg_line_length": 28.62680115273775,
"alnum_prop": 0.6386470025670711,
"repo_name": "teamtachyon/Quillpad-Server",
"id": "66261a9a671e2232aacdb7a1c0b0ab637be90d69",
"size": "19978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "QuillManual.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "31654"
},
{
"name": "Python",
"bytes": "262170"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('share', '0012_auto_20161212_1555'),
]
operations = [
migrations.CreateModel(
name='Department',
fields=[
],
options={
'proxy': True,
},
bases=('share.organization',),
),
migrations.CreateModel(
name='DepartmentVersion',
fields=[
],
options={
'proxy': True,
},
bases=('share.organizationversion',),
),
migrations.CreateModel(
name='PrincipalInvestigatorContact',
fields=[
],
options={
'proxy': True,
},
bases=('share.principalinvestigator',),
),
migrations.CreateModel(
name='PrincipalInvestigatorContactVersion',
fields=[
],
options={
'proxy': True,
},
bases=('share.principalinvestigatorversion',),
),
migrations.AddField(
model_name='award',
name='award_amount',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='award',
name='date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='awardversion',
name='award_amount',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='awardversion',
name='date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='abstractagent',
name='type',
field=models.CharField(choices=[('share.agent', 'agent'), ('share.organization', 'organization'), ('share.consortium', 'consortium'), ('share.department', 'department'), ('share.institution', 'institution'), ('share.person', 'person')], db_index=True, max_length=255),
),
migrations.AlterField(
model_name='abstractagentversion',
name='type',
field=models.CharField(choices=[('share.agentversion', 'agent version'), ('share.organizationversion', 'organization version'), ('share.consortiumversion', 'consortium version'), ('share.departmentversion', 'department version'), ('share.institutionversion', 'institution version'), ('share.personversion', 'person version')], db_index=True, max_length=255),
),
migrations.AlterField(
model_name='abstractagentworkrelation',
name='type',
field=models.CharField(choices=[('share.agentworkrelation', 'agent work relation'), ('share.contributor', 'contributor'), ('share.creator', 'creator'), ('share.principalinvestigator', 'principal investigator'), ('share.principalinvestigatorcontact', 'principal investigator contact'), ('share.funder', 'funder'), ('share.host', 'host'), ('share.publisher', 'publisher')], db_index=True, max_length=255),
),
migrations.AlterField(
model_name='abstractagentworkrelationversion',
name='type',
field=models.CharField(choices=[('share.agentworkrelationversion', 'agent work relation version'), ('share.contributorversion', 'contributor version'), ('share.creatorversion', 'creator version'), ('share.principalinvestigatorversion', 'principal investigator version'), ('share.principalinvestigatorcontactversion', 'principal investigator contact version'), ('share.funderversion', 'funder version'), ('share.hostversion', 'host version'), ('share.publisherversion', 'publisher version')], db_index=True, max_length=255),
),
]
| {
"content_hash": "b614962352d8697e18fc2766af02d7d4",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 535,
"avg_line_length": 44.30337078651685,
"alnum_prop": 0.5861019528277961,
"repo_name": "laurenbarker/SHARE",
"id": "94d36e49798c485fa79677211acf09208fe9ef9e",
"size": "4015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "share/migrations/0013_auto_20161214_1921.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3786"
},
{
"name": "Gherkin",
"bytes": "1773"
},
{
"name": "HTML",
"bytes": "4849"
},
{
"name": "Python",
"bytes": "1431647"
},
{
"name": "Shell",
"bytes": "830"
}
],
"symlink_target": ""
} |
"""
======================================
Searching and downloading from the VSO
======================================
How to download data from the VSO with Fido.
"""
import astropy.units as u
from sunpy.net import Fido, attrs as a
###############################################################################
# `Fido <sunpy.net.fido_factory.UnifiedDownloaderFactory>` is the primary
# interface to search for and download data and
# will search the VSO when appropriate. The following example searches for all
# SOHO/EIT images between the times defined below by defining
# a timerange (`~sunpy.net.attrs.Time`) and
# the instrument (`~sunpy.net.attrs.Instrument`).
attrs_time = a.Time('2005/01/01 00:10', '2005/01/01 00:15')
result = Fido.search(attrs_time, a.Instrument('eit'))
###############################################################################
# Let's inspect the results.
print(result)
###############################################################################
# The following shows how to download the results. If we
# don't provide a path it will download the file into the sunpy data directory.
# The output provides the path of the downloaded files.
downloaded_files = Fido.fetch(result)
print(downloaded_files)
###############################################################################
# More complicated queries can be constructed by using relational operators.
# For example, it is possible to query two wavelengths at the same time with
# the OR operator (|).
result = Fido.search(a.Time('2012/03/04 00:00', '2012/03/04 00:02'),
a.Instrument('aia'),
a.Wavelength(171*u.angstrom) | a.Wavelength(94*u.angstrom))
print(result)
| {
"content_hash": "19084f201205a9a086eff4f005e16097",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 80,
"avg_line_length": 43.8974358974359,
"alnum_prop": 0.5589953271028038,
"repo_name": "dpshelio/sunpy",
"id": "b596a5d6dad25fbe4306fbdc32414fab0676adea",
"size": "1736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/acquiring_data/searching_vso.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "73732"
},
{
"name": "IDL",
"bytes": "5746"
},
{
"name": "Python",
"bytes": "1922243"
},
{
"name": "Shell",
"bytes": "235"
}
],
"symlink_target": ""
} |
from text import nltkmgr
from nltk.corpus import state_union
from nltk.tokenize import PunktSentenceTokenizer
r = nltkmgr.synset("ready")
print(r)
| {
"content_hash": "bb68f79365e8787c3b5d04c63f64210b",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 48,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.8108108108108109,
"repo_name": "deepakkumar1984/sia-cog",
"id": "447befabad0cc7305f30e62b5fc3d25b166e7cce",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/layers_understand.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "10425"
},
{
"name": "C++",
"bytes": "146"
},
{
"name": "Cuda",
"bytes": "5064"
},
{
"name": "Makefile",
"bytes": "264"
},
{
"name": "Python",
"bytes": "394909"
},
{
"name": "Shell",
"bytes": "1732"
}
],
"symlink_target": ""
} |
"""
Definitions of tree structures.
"""
from typing import Dict, List, Tuple, Set, Mapping, Any, Iterator
from penman.types import (Variable, Branch, Node)
_Step = Tuple[Tuple[int, ...], Branch] # see Tree.walk()
class Tree:
"""
A tree structure.
A tree is essentially a node that contains other nodes, but this
Tree class is useful to contain any metadata and to provide
tree-based methods.
"""
__slots__ = 'node', 'metadata'
def __init__(self,
node: Node,
metadata: Mapping[str, str] = None):
self.node = node
self.metadata = metadata or {}
def __eq__(self, other) -> bool:
if isinstance(other, Tree):
other = other.node
return self.node == other
def __repr__(self) -> str:
return f'Tree({self.node!r})'
def __str__(self) -> str:
s = _format(self.node, 2)
return f'Tree(\n {s})'
def nodes(self) -> List[Node]:
"""
Return the nodes in the tree as a flat list.
"""
return _nodes(self.node)
def walk(self) -> Iterator[_Step]:
"""
Iterate over branches in the tree.
This function yields pairs of (*path*, *branch*) where each
*path* is a tuple of 0-based indices of branches to get to
*branch*. For example, the path (2, 0) is the concept branch
`('/', 'bark-01')` in the tree for the following PENMAN
string, traversing first to the third (index 2) branch of the
top node, then to the first (index 0) branch of that node::
(t / try-01
:ARG0 (d / dog)
:ARG1 (b / bark-01
:ARG0 d))
The (*path*, *branch*) pairs are yielded in depth-first order
of the tree traversal.
"""
yield from _walk(self.node, ())
def reset_variables(self, fmt='{prefix}{j}') -> None:
"""
Recreate node variables formatted using *fmt*.
The *fmt* string can be formatted with the following values:
- ``prefix``: first alphabetic character in the node's concept
- ``i``: 0-based index of the current occurrence of the prefix
- ``j``: 1-based index starting from the second occurrence
"""
varmap: Dict[Variable, Variable] = {}
used: Set[Variable] = set()
for var, branches in self.nodes():
if var not in varmap:
concept = next((tgt for role, tgt in branches if role == '/'),
None)
pre = _default_variable_prefix(concept)
i = 0
newvar = None
while newvar is None or newvar in used:
newvar = fmt.format(
prefix=pre,
i=i,
j='' if i == 0 else i + 1)
i += 1
used.add(newvar)
varmap[var] = newvar
self.node = _map_vars(self.node, varmap)
def _format(node: Node, level: int) -> str:
var, branches = node
next_level = level + 2
indent = '\n' + ' ' * next_level
branch_strings = [_format_branch(branch, next_level)
for branch in branches]
return '({!r}, [{}{}])'.format(
var, indent, (',' + indent).join(branch_strings))
def _format_branch(branch: Branch, level: int) -> str:
role, target = branch
if is_atomic(target):
target = repr(target)
else:
target = _format(target, level)
return f'({role!r}, {target})'
def _nodes(node: Node) -> List[Node]:
var, branches = node
ns = [] if var is None else [node]
for _, target in branches:
# if target is not atomic, assume it's a valid tree node
if not is_atomic(target):
ns.extend(_nodes(target))
return ns
def _walk(node: Node, path: Tuple[int, ...]) -> Iterator[_Step]:
var, branches = node
for i, branch in enumerate(branches):
curpath = path + (i,)
yield (curpath, branch)
_, target = branch
if not is_atomic(target):
yield from _walk(target, curpath)
def _default_variable_prefix(concept: Any) -> Variable:
"""
Return the variable prefix for *concept*.
If *concept* is a non-empty string, the prefix is the first
alphabetic character in the string, if there are any, downcased.
Otherwise the prefix is ``'_'``.
Examples:
>>> _default_variable_prefix('Alphabet')
'a'
>>> _default_variable_prefix('chase-01')
'c'
>>> _default_variable_prefix('"string"')
's'
>>> _default_variable_prefix('_predicate_n_1"')
'p'
>>> _default_variable_prefix(1)
'_'
>>> _default_variable_prefix(None)
'_'
>>> _default_variable_prefix('')
'_'
"""
prefix = '_'
if concept and isinstance(concept, str):
for c in concept:
if c.isalpha():
prefix = c.lower()
break
return prefix
def _map_vars(node, varmap):
var, branches = node
newbranches: List[Branch] = []
for role, tgt in branches:
if not is_atomic(tgt):
tgt = _map_vars(tgt, varmap)
elif role != '/' and tgt in varmap:
tgt = varmap[tgt]
newbranches.append((role, tgt))
return (varmap[var], newbranches)
def is_atomic(x) -> bool:
"""
Return ``True`` if *x* is a valid atomic value.
Examples:
>>> from penman.tree import is_atomic
>>> is_atomic('a')
True
>>> is_atomic(None)
True
>>> is_atomic(3.14)
True
>>> is_atomic(('a', [('/', 'alpha')]))
False
"""
return x is None or isinstance(x, (str, int, float))
| {
"content_hash": "904099574535d64c69ff21831a70ed59",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 78,
"avg_line_length": 28.98507462686567,
"alnum_prop": 0.5298661174047374,
"repo_name": "goodmami/penman",
"id": "561650ec280c05a86ff87e82b82019ce3447cbc9",
"size": "5827",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "penman/tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "186533"
}
],
"symlink_target": ""
} |
import os
import platform
import textwrap
import unittest
import pytest
from parameterized import parameterized
from conans.client.tools.apple import to_apple_arch
from conans.test.assets.autotools import gen_makefile
from conans.test.assets.sources import gen_function_cpp, gen_function_h
from conans.test.utils.tools import TestClient
@pytest.mark.skipif(platform.system() != "Darwin", reason="requires Xcode")
class AutoToolsAppleTest(unittest.TestCase):
makefile = gen_makefile(apps=["app"], libs=["hello"])
conanfile_py = textwrap.dedent("""
from conans import ConanFile, tools, AutoToolsBuildEnvironment
class App(ConanFile):
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def build(self):
env_build = AutoToolsBuildEnvironment(self)
env_build.make()
""")
@parameterized.expand([("x86_64", "Macos", "10.14"),
("armv8", "iOS", "10.0"),
("armv7", "iOS", "10.0"),
("x86", "iOS", "10.0"),
("x86_64", "iOS", "10.0"),
("armv8", "Macos", "10.14") # M1
])
def test_makefile_arch(self, arch, os_, os_version):
self.arch = arch
self.os = os_
self.os_version = os_version
profile = textwrap.dedent("""
include(default)
[settings]
os = {os}
os.version = {os_version}
arch = {arch}
""").format(os=self.os, arch=self.arch, os_version=self.os_version)
self.t = TestClient()
hello_h = gen_function_h(name="hello")
hello_cpp = gen_function_cpp(name="hello")
main_cpp = gen_function_cpp(name="main", includes=["hello"], calls=["hello"])
self.t.save({"Makefile": self.makefile,
"hello.h": hello_h,
"hello.cpp": hello_cpp,
"app.cpp": main_cpp,
"conanfile.py": self.conanfile_py,
"profile": profile})
self.t.run("install . --profile:host=profile")
self.t.run("build .")
libhello = os.path.join(self.t.current_folder, "libhello.a")
app = os.path.join(self.t.current_folder, "app")
self.assertTrue(os.path.isfile(libhello))
self.assertTrue(os.path.isfile(app))
expected_arch = to_apple_arch(self.arch)
self.t.run_command('lipo -info "%s"' % libhello)
self.assertIn("architecture: %s" % expected_arch, self.t.out)
self.t.run_command('lipo -info "%s"' % app)
self.assertIn("architecture: %s" % expected_arch, self.t.out)
@parameterized.expand([("x86_64",), ("armv8",)])
def test_catalyst(self, arch):
profile = textwrap.dedent("""
include(default)
[settings]
os = Macos
os.version = 12.0
os.sdk = macosx
os.subsystem = catalyst
os.subsystem.ios_version = 13.1
arch = {arch}
""").format(arch=arch)
self.t = TestClient()
hello_h = gen_function_h(name="hello")
hello_cpp = gen_function_cpp(name="hello")
main_cpp = textwrap.dedent("""
#include "hello.h"
#include <TargetConditionals.h>
#include <iostream>
int main()
{
#if TARGET_OS_MACCATALYST
std::cout << "running catalyst " << __IPHONE_OS_VERSION_MIN_REQUIRED << std::endl;
#else
#error "not building for Apple Catalyst"
#endif
}
""")
self.t.save({"Makefile": self.makefile,
"hello.h": hello_h,
"hello.cpp": hello_cpp,
"app.cpp": main_cpp,
"conanfile.py": self.conanfile_py,
"profile": profile})
self.t.run("install . --profile:host=profile")
self.t.run("build .")
libhello = os.path.join(self.t.current_folder, "libhello.a")
app = os.path.join(self.t.current_folder, "app")
self.assertTrue(os.path.isfile(libhello))
self.assertTrue(os.path.isfile(app))
expected_arch = to_apple_arch(arch)
self.t.run_command('lipo -info "%s"' % libhello)
self.assertIn("architecture: %s" % expected_arch, self.t.out)
self.t.run_command('lipo -info "%s"' % app)
self.assertIn("architecture: %s" % expected_arch, self.t.out)
if arch == "x86_64":
self.t.run_command('"%s"' % app)
self.assertIn("running catalyst 130100", self.t.out)
| {
"content_hash": "b2e2320afcb7a4fea5525fc2351ccb2b",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 98,
"avg_line_length": 35.542857142857144,
"alnum_prop": 0.5295418006430869,
"repo_name": "conan-io/conan",
"id": "e68127762a8fe6d845b7a7890eb9ffa6debd22c7",
"size": "4976",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/functional/build_helpers/autotools_apple_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Python",
"bytes": "8209945"
}
],
"symlink_target": ""
} |
"""
This module implements a set of languages as collections of features that are
language specific.
feature collections
+++++++++++++++++++
Languages implement a subset of feature collections (e.g.
:class:`~revscoring.languages.features.Dictionary`,
:class:`~revscoring.languages.features.Stopwords`,
:class:`~revscoring.languages.features.Stemmed`,
:class:`~revscoring.languages.features.RegexMatches`, and
:class:`~revscoring.languages.features.SubstringMatches`) based on what
language assets are available. See :mod:`revscoring.languages.features`.
albanian
++++++++
.. automodule:: revscoring.languages.albanian
:members:
arabic
++++++
.. automodule:: revscoring.languages.arabic
:members:
bengali
+++++++
.. automodule:: revscoring.languages.bengali
:members:
bosnian
+++++++
.. automodule:: revscoring.languages.bosnian
:members:
catalan
+++++++
.. automodule:: revscoring.languages.catalan
:members:
chinese
+++++++
.. automodule:: revscoring.languages.chinese
:members:
croatian
++++++++
.. automodule:: revscoring.languages.croatian
:members:
czech
+++++
.. automodule:: revscoring.languages.czech
:members:
dutch
+++++
.. automodule:: revscoring.languages.dutch
:members:
english
+++++++
.. automodule:: revscoring.languages.english
:members:
estonian
++++++++
.. automodule:: revscoring.languages.estonian
:members:
finnish
+++++++
.. automodule:: revscoring.languages.finnish
:members:
french
++++++
.. automodule:: revscoring.languages.french
:members:
galician
++++++++
.. automodule:: revscoring.languages.galician
:members:
german
++++++
.. automodule:: revscoring.languages.german
:members:
greek
+++++++
.. automodule:: revscoring.languages.greek
:members:
hebrew
++++++
.. automodule:: revscoring.languages.hebrew
:members:
hindi
+++++
.. automodule:: revscoring.languages.hindi
:members:
hungarian
+++++++++
.. automodule:: revscoring.languages.hungarian
:members:
icelandic
+++++++++
.. automodule:: revscoring.languages.icelandic
:members:
indonesian
++++++++++
.. automodule:: revscoring.languages.indonesian
:members:
italian
+++++++
.. automodule:: revscoring.languages.italian
:members:
japanese
++++++++
.. automodule:: revscoring.languages.japanese
:members:
korean
++++++
.. automodule:: revscoring.languages.korean
:members:
latvian
+++++++
.. automodule:: revscoring.languages.latvian
:members:
norwegian
+++++++++
.. automodule:: revscoring.languages.norwegian
:members:
persian
+++++++
.. automodule:: revscoring.languages.persian
:members:
polish
++++++
.. automodule:: revscoring.languages.polish
:members:
portuguese
++++++++++
.. automodule:: revscoring.languages.portuguese
:members:
romanian
++++++++
.. automodule:: revscoring.languages.romanian
:members:
russian
+++++++
.. automodule:: revscoring.languages.russian
:members:
spanish
+++++++
.. automodule:: revscoring.languages.spanish
:members:
swedish
+++++++
.. automodule:: revscoring.languages.swedish
:members:
tamil
+++++
.. automodule:: revscoring.languages.tamil
:members:
turkish
+++++++
.. automodule:: revscoring.languages.turkish
:members:
ukrainian
+++++++++
.. automodule:: revscoring.languages.ukrainian
:members:
vietnamese
++++++++++
.. automodule:: revscoring.languages.vietnamese
:members:
"""
| {
"content_hash": "bf43a6c40f732c83c9756a25d3f4ab2c",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 77,
"avg_line_length": 16.975,
"alnum_prop": 0.6765832106038292,
"repo_name": "wiki-ai/revscoring",
"id": "174f81bd0cbe7a43f07ba90bfcbc51eb5a7aca94",
"size": "3395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "revscoring/languages/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "777"
},
{
"name": "Jupyter Notebook",
"bytes": "32675"
},
{
"name": "Python",
"bytes": "957061"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import logging
import pytest
from storops_comptest.utils import setup_fixture
from storops_comptest.vnx import VNXGeneralFixtureManager, \
MultiVNXGeneralFixtureManager
__author__ = 'Cedric Zhuang'
log = logging.getLogger(__name__)
@pytest.fixture(scope='session')
def vnx_gf(request):
""" General fixture for most vnx cases
Details including:
vnx - reference to the system.
pool - A RAID5 pool with 3 disks created on the fly.
lun - A LUN created in the pool.
snap - A snap created upon the LUN.
:param request:
:return:
"""
return setup_fixture(request, VNXGeneralFixtureManager)
@pytest.fixture(scope='session')
def multi_vnx_gf(request):
""" general fixture for multi VNX test cases
Details including:
vnx - reference to the system
sync_mirror - a synchronized mirror
:param request:
:return:
"""
return setup_fixture(request, MultiVNXGeneralFixtureManager)
| {
"content_hash": "97cb2c67d5bc29614ff48599312ee344",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 64,
"avg_line_length": 24.952380952380953,
"alnum_prop": 0.6698473282442748,
"repo_name": "emc-openstack/storops",
"id": "8aa25ff80f7e23538661eaa4cfc31ce525d81fdd",
"size": "1698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storops_comptest/vnx/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1807840"
},
{
"name": "Shell",
"bytes": "3895"
}
],
"symlink_target": ""
} |
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXCSYMB(gxapi_cy.WrapCSYMB):
"""
GXCSYMB class.
This class is used for generating and modifying colored symbol objects.
Symbol fills are assigned colors based on their Z values and a zone, Aggregate
or `GXITR <geosoft.gxapi.GXITR>` file which defines what colors are associated with different ranges
of Z values. The position of a symbol is defined by its X,Y coordinates.
"""
def __init__(self, handle=0):
super(GXCSYMB, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXCSYMB <geosoft.gxapi.GXCSYMB>`
:returns: A null `GXCSYMB <geosoft.gxapi.GXCSYMB>`
:rtype: GXCSYMB
"""
return GXCSYMB()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Miscellaneous
def set_angle(self, angle):
"""
Set the symbol angle.
:param angle: Symbol angle
:type angle: float
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_angle(angle)
def set_base(self, base):
"""
Set base value to subtract from Z values.
:param base: Symbol Base
:type base: float
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_base(base)
def set_dynamic_col(self, att):
"""
Associate symbol edge or fill colors with Z data
and color transform.
:param att: :ref:`CSYMB_COLOR`
:type att: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Use this method after a call to `set_static_col <geosoft.gxapi.GXCSYMB.set_static_col>`. This method
reestablishes the symbol color association with their Z data
values and color transform.
"""
self._set_dynamic_col(att)
def set_fixed(self, fixed):
"""
Set symbol sizing to fixed (or proportionate)
:param fixed: TRUE = Fixed symbol sizing FALSE = Proportionate sizing
:type fixed: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_fixed(fixed)
def set_number(self, number):
"""
Set the symbol number.
:param number: Symbol number (0x1-0x1ffff)
:type number: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** The lower 16 bits of the number is interpreted as UTF-16 with a valid Unicode character
code point. GFN fonts wil produce valid symbols depending on the font for 0x01-0x7f and the degree,
plus-minus and diameter symbol (latin small letter o with stroke) for 0xB0, 0xB1 and 0xF8 respectively.
It is possible to check if a character is valid using `GXUNC.is_valid_utf16_char <geosoft.gxapi.GXUNC.is_valid_utf16_char>`. The high 16-bits are reserved
for future use. Also see: `GXUNC.valid_symbol <geosoft.gxapi.GXUNC.valid_symbol>` and `GXUNC.validate_symbols <geosoft.gxapi.GXUNC.validate_symbols>`
"""
self._set_number(number)
def set_scale(self, scale):
"""
Set the symbol scale.
:param scale: Symbol scale (> 0.0)
:type scale: float
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_scale(scale)
def add_data(self, vv_x, vv_y, vv_z):
"""
Add x,y,z data to a color symbol object.
:param vv_x: `GXVV <geosoft.gxapi.GXVV>` for X data
:param vv_y: `GXVV <geosoft.gxapi.GXVV>` for Y data
:param vv_z: `GXVV <geosoft.gxapi.GXVV>` for Z data
:type vv_x: GXVV
:type vv_y: GXVV
:type vv_z: GXVV
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._add_data(vv_x, vv_y, vv_z)
@classmethod
def create(cls, itr):
"""
Create a `GXCSYMB <geosoft.gxapi.GXCSYMB>`.
:param itr: ZON, `GXAGG <geosoft.gxapi.GXAGG>`, or `GXITR <geosoft.gxapi.GXITR>` file name
:type itr: str
:returns: `GXCSYMB <geosoft.gxapi.GXCSYMB>` handle
:rtype: GXCSYMB
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapCSYMB._create(GXContext._get_tls_geo(), itr.encode())
return GXCSYMB(ret_val)
def get_itr(self, itr):
"""
Get the `GXITR <geosoft.gxapi.GXITR>` of the `GXCSYMB <geosoft.gxapi.GXCSYMB>`
:param itr: `GXITR <geosoft.gxapi.GXITR>` object
:type itr: GXITR
.. versionadded:: 9.3
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._get_itr(itr)
def set_font(self, font, geo_font, weight, italic):
"""
Set the symbol font name.
:param font: Font name
:param geo_font: Geosoft font? (TRUE or FALSE)
:param weight: :ref:`MVIEW_FONT_WEIGHT`
:param italic: Italics? (TRUE or FALSE)
:type font: str
:type geo_font: int
:type weight: int
:type italic: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_font(font.encode(), geo_font, weight, italic)
def set_static_col(self, col, att):
"""
Set a static color for the symbol edge or fill.
:param col: Color value
:param att: :ref:`CSYMB_COLOR`
:type col: int
:type att: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Use this method to set a STATIC color for symbol edge or fill.
By default, both edge and fill colors vary according to their
Z data values and a color transform.
"""
self._set_static_col(col, att)
def get_stat(self, st):
"""
Get the `GXST <geosoft.gxapi.GXST>` of the `GXCSYMB <geosoft.gxapi.GXCSYMB>`
:param st: `GXST <geosoft.gxapi.GXST>` object
:type st: GXST
.. versionadded:: 2021.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._get_stat(st)
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer | {
"content_hash": "fc86b3c003b4a7749cc5e84d5b024ae9",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 162,
"avg_line_length": 28.286195286195287,
"alnum_prop": 0.5946911082014046,
"repo_name": "GeosoftInc/gxpy",
"id": "b927a5dee193481c90d907f738cfa0f802d8e337",
"size": "8511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geosoft/gxapi/GXCSYMB.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "4799134"
}
],
"symlink_target": ""
} |
"""A class to generate random BidRequest protocol buffers."""
import base64
import random
import time
import realtime_bidding_pb2
PROTOCOL_VERSION = 1
BID_REQUEST_ID_LENGTH = 16 # In bytes.
COOKIE_LENGTH = 20 # In bytes.
COOKIE_VERSION = 1
# Placement.
CHANNELS = ['12345']
# Data describing branded publishers.
# Tuple description: (publisher url, seller id, publisher settings id, seller)
# The below are example values for these fields that are used to populate
# publisher info.
BRANDED_PUB_DATA = [
('http://www.youtube.com', 502, 32423234, 'Youtube'),
('http://www.youtube.com/shows', 502, 32423234, 'Youtube'),
('http://news.google.com', 10001, 56751341, 'Google News'),
('http://news.google.com/news?pz=1&ned=us&topic=b&ict=ln', 10001, 12672383,
'Google News'),
('http://www.google.com/finance?hl=en&ned=us&tab=ne', 1528, 84485234,
'Google Finance'),
('http://www.nytimes.com/pages/technology/index.html', 936, 9034124,
'New York Times'),
('http://some.gcn.site.com', 10002, 12002392, 'GCN'),
]
# Data for anonymous publishers.
# Tuple description: (anonymous url, publisher settings id)
ANONYMOUS_PUB_DATA = [
('http://1.google.anonymous/', 90002301),
('http://2.google.anonymous/', 90002302),
('http://3.google.anonymous/', 90002303),
('http://4.google.anonymous/', 93002304),
('http://5.google.anonymous/', 93002305),
]
MAX_ADGROUP_ID = 99999999
MAX_DIRECT_DEAL_ID = 1 << 62
MAX_MATCHING_ADGROUPS = 3
DIMENSIONS = [
(468, 60),
(120, 600),
(728, 90),
(300, 250),
(250, 250),
(336, 280),
(120, 240),
(125, 125),
(160, 600),
(180, 150),
(110, 32),
(120, 60),
(180, 60),
(420, 600),
(420, 200),
(234, 60),
(200, 200),
]
MAX_SLOT_ID = 200
# Verticals.
MAX_NUM_VERTICALS = 5
VERTICALS = [
66, 563, 607, 379, 380, 119, 570, 22, 355, 608, 540, 565, 474, 433, 609,
23, 24,
]
# Geo.
LANGUAGE_CODES = ['en']
# Example geo targets used to populate requests.
# Tuple description (geo_criteria_id, postal code, postal code prefix)
# Only one of postal code or postal code prefix will be set.
# Canada has only postal code prefixes available.
GEO_CRITERIA = [
(9005559, '10116', None), # New York, United States
(9031936, '94087', None), # California, United States
(1015214, '33601', None), # Tampa, Florida, United States
(1021337, '27583', None), # Timberlake, North Carolina, United States
(1012873, '99501', None), # Anchorage, Alaska, United States
(1018127, '02102', None), # Boston, Massachusetts, United States
(1002451, None, 'M4C'), # Toronto, Ontario, Canada
(1002113, None, 'B3H'), # Halifax, Nova Scotia, Canada
(1002061, None, 'E1B'), # Moncton, New Brunswick, Canada
(1000278, '2753', None), # Richmond, New South Wales, Australia
(1000142, '2600', None), # Canberra, Australian Capital Territory,Australia
(1000414, '4810', None), # Townsville, Queensland, Australia
(1000567, '3000', None), # Melbourne, Victoria, Australia
]
# User info.
USER_AGENTS = [
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.2) '
'Gecko/2008092313 Ubuntu/8.04 (hardy) Firefox/3.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) '
'Gecko/20070118 Firefox/2.0.0.2pre',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.7pre) Gecko/20070815 '
'Firefox/2.0.0.6 Navigator/9.0b3',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_4_11; en) AppleWebKit/528.5+'
' (KHTML, like Gecko) Version/4.0 Safari/528.1',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; sv-se) AppleWebKit/419 '
'(KHTML, like Gecko) Safari/419.3',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0;)',
'Mozilla/4.08 (compatible; MSIE 6.0; Windows NT 5.1)',
]
# Criteria.
MAX_EXCLUDED_ATTRIBUTES = 3
CREATIVE_ATTRIBUTES = [1, 2, 3, 4, 5, 6, 7, 8, 9]
MAX_EXCLUDED_BUYER_NETWORKS = 2
MAX_INCLUDED_VENDOR_TYPES = 10
VENDOR_TYPES = [
0, 10, 28, 42, 51, 65, 71, 92, 94, 113, 126, 128, 129, 130, 143, 144, 145,
146, 147, 148, 149, 152, 179, 194, 198, 225, 226, 227, 228, 229, 230, 231,
232, 233, 234, 235, 236, 237, 238, 255, 303, 311, 312, 313, 314, 315, 316,
]
INSTREAM_VIDEO_VENDOR_TYPES = [297, 220, 306, 307, 308, 309, 310, 317, 318,]
MAX_EXCLUDED_CATEGORIES = 4
AD_CATEGORIES = [0, 3, 4, 5, 7, 8, 10, 18, 19, 23, 24, 25,]
MAX_TARGETABLE_CHANNELS = 3
TARGETABLE_CHANNELS = [
'all top banner ads', 'right hand side banner', 'sports section',
'user generated comments', 'weather and news',
]
# Mobile constants.
DEFAULT_MOBILE_PROPORTION = 0.2
# Identifiers for mobile carriers that devices use to connect to the internet.
# See https://developers.google.com/adwords/api/docs/appendix/mobilecarriers
MOBILE_CARRIERS = [70152, 70361, 70392, 71352]
# Device type constants.
PHONE = realtime_bidding_pb2.BidRequest.Mobile.HIGHEND_PHONE
TABLET = realtime_bidding_pb2.BidRequest.Mobile.TABLET
# Screen Orientation Constants.
PORTRAIT = realtime_bidding_pb2.BidRequest.Mobile.SCREEN_ORIENTATION_PORTRAIT
LANDSCAPE = realtime_bidding_pb2.BidRequest.Mobile.SCREEN_ORIENTATION_LANDSCAPE
# These category ids represent Google play store and iTunes app store ids that
# the mobile app belongs to. Its not set for mobile web requests. See
# https://developers.google.com/adwords/api/docs/appendix/mobileappcategories
MOBILE_ANDROID_CATEGORY_IDS = [60005, 60025, 60032, 60004, 60002]
MOBILE_IOS_CATEGORY_IDS = [60535, 60508, 60548, 60556, 60564]
NUM_CATEGORIES = len(MOBILE_ANDROID_CATEGORY_IDS)
assert NUM_CATEGORIES == len(MOBILE_IOS_CATEGORY_IDS)
# Mobile device specific field collection where each row describes a device.
# Tuple description (platform, os major version, os minor version, os micro
# version, device type, is_app, is_interstitial, screen orientation, ad slot
# width, ad slot height, user agent)
# is_app: flag that indicates requests from app when set, mobile web requests
# when unset.
# is_interstitial: indicates interstitial app requests.
MOBILE_DEVICE_INFO = [
('iphone', 6, 1, 2, PHONE, True, False, PORTRAIT, 320, 50,
'Mozilla/5.0 (iPhone; CPU iPhone OS 6_1_2 like Mac OS X) AppleWebKit/'
'536.26 (KHTML, like Gecko) Mobile/10B146,gzip(gfe)'),
('android', 2, 3, 6, PHONE, True, False, LANDSCAPE, 320, 50,
'Mozilla/5.0 (Linux; U; Android 2.3.6; it-it; GT-S5570I Build/GINGERBREAD)'
' AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1 '
'(Mobile; afma-sdk-a-v6.1.0),gzip(gfe)'),
('android', 4, 1, 1, TABLET, True, False, LANDSCAPE, 728, 90,
'Mozilla/5.0 (Linux; U; Android 4.1.1; fr-ca; GT-P3113 Build/JRO03C) '
'AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30 (Mobile;'
' afma-sdk-a-v6.1.0),gzip(gfe)'),
('ipad', 6, 1, 2, TABLET, True, True, LANDSCAPE, 768, 1024,
'Mozilla/5.0 (iPad; CPU OS 6_1_2 like Mac OS X) AppleWebKit/536.26'
' (KHTML, like Gecko) Mobile/10B146,gzip(gfe)'),
('android', 4, 0, 4, PHONE, True, True, PORTRAIT, 360, 640,
'Mozilla/5.0 (Linux; U; Android 4.0.4; en-us; DROID BIONIC '
'Build/6.7.2-223_DBN_M4-23) AppleWebKit/534.30 (KHTML, like Gecko) '
'Version/4.0 Mobile Safari/534.30 (Mobile; afma-sdk-a-v6.2.1),gzip(gfe)'),
('ipad', 5, 1, 1, TABLET, True, False, PORTRAIT, 468, 60,
'Mozilla/5.0 (iPad; CPU OS 5_1_1 like Mac OS X) AppleWebKit/534.46 (KHTML,'
'like Gecko) Mobile/9B206,gzip(gfe)'),
('android', 2, 3, 5, PHONE, False, False, 0, 728, 90,
'Mozilla/5.0 (Linux; U; Android 2.3.5; en-us; DROID X2 Build/'
'4.5.1A-DTN-200-18) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 '
'Mobile Safari/533.1,gzip(gfe)'),
('iphone', 4, 2, 1, PHONE, False, False, 0, 728, 90,
'Mozilla/5.0 (iPod; U; CPU iPhone OS 4_2_1 like Mac OS X; en-us) '
'AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 '
'Safari/6533.18.5,gzip(gfe)'),
('blackberry', 9, 2, 20, PHONE, False, False, 0, 320, 50,
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9220; en) AppleWebKit/534.11+ '
'(KHTML, like Gecko) Version/7.1.0.337 Mobile Safari/534.11+,gzip(gfe)')
]
NUM_MOBILE_DEVICES = len(MOBILE_DEVICE_INFO)
ANDROID_APP_IDS = ['com.foo.bar', 'fus.ro.dah', 'test.app.id', 'a.b.c',
'com.one.two']
IOS_APP_IDS = ['610434022', '4453712097', '530434022', '445275396', '610424031']
MOBILE_VENDOR_TYPES = [423, 534]
MAX_INCLUDED_VENDOR_TYPES = 10
DEFAULT_INSTREAM_VIDEO_PROPORTION = 0.1
INSTREAM_VIDEO_START_DELAY_MAX_SECONDS = 60
INSTREAM_VIDEO_DURATION_MAX_SECONDS = 60
# Types of invideo_requests.
INSTREAM_VIDEO_PREROLL = 0
INSTREAM_VIDEO_MIDROLL = 1
INSTREAM_VIDEO_POSTROLL = -1
INSTREAM_VIDEO_TYPES = [
INSTREAM_VIDEO_PREROLL, INSTREAM_VIDEO_MIDROLL, INSTREAM_VIDEO_POSTROLL]
random.seed(time.time())
class RandomBidGeneratorWrapper(object):
"""Generates random BidRequests."""
def __init__(self, google_id_list=None,
instream_video_proportion=DEFAULT_INSTREAM_VIDEO_PROPORTION,
mobile_proportion=DEFAULT_MOBILE_PROPORTION,
adgroup_ids_list=None):
"""Constructs a new RandomBidGenerator.
Args:
google_id_list: A list of Google IDs (as strings), or None to randomly
generate IDs.
instream_video_proportion: The proportion of requests which are for
instream video ads [0.0, 1.0].
mobile_proportion: Fraction of requests that are from a mobile device.
adgroup_ids_list: A list of AdGroup IDs (as ints), or None to randomly
generate IDs.
"""
self._instream_video_proportion = instream_video_proportion
self._mobile_proportion = mobile_proportion
self._default_bid_generator = DefaultBidGenerator(google_id_list,
adgroup_ids_list)
self._mobile_bid_generator = MobileBidGenerator(google_id_list,
adgroup_ids_list)
self._video_bid_generator = VideoBidGenerator(google_id_list,
adgroup_ids_list)
def GenerateBidRequest(self):
"""Generates a random BidRequest.
Returns:
An instance of realtime_bidding_pb2.BidRequest.
"""
random_number = random.random()
if random_number < self._instream_video_proportion:
return self._video_bid_generator.GenerateBidRequest()
elif random_number < (self._instream_video_proportion
+ self._mobile_proportion):
return self._mobile_bid_generator.GenerateBidRequest()
# else
return self._default_bid_generator.GenerateBidRequest()
def GeneratePingRequest(self):
"""Generates a special ping request.
Returns:
A ping request with a generated id.
"""
return self._default_bid_generator.GeneratePingRequest()
class DefaultBidGenerator(object):
"""Base bid request generator."""
def __init__(self, google_id_list=None, adgroup_ids_list=None):
"""Constructor for the base generator.
Args:
google_id_list: A list of Google IDs (as strings), or None to randomly
generate IDs.
adgroup_ids_list: A list of AdGroup IDs (as ints), or None to randomly
generate IDs.
"""
self._google_id_list = google_id_list
if adgroup_ids_list is not None:
self._adgroup_ids = set(adgroup_ids_list)
else:
self._adgroup_ids = None
self._vendor_types = VENDOR_TYPES
self._slot_width, self._slot_height = random.choice(DIMENSIONS)
self._user_agent_list = USER_AGENTS
def GenerateBidRequest(self):
"""Generates a random BidRequest.
Returns:
An instance of realtime_bidding_pb2.BidRequest.
"""
bid_request = realtime_bidding_pb2.BidRequest()
bid_request.is_test = True
bid_request.id = self._GenerateId(BID_REQUEST_ID_LENGTH)
bid_request.user_agent = random.choice(USER_AGENTS)
self._GeneratePageInfo(bid_request)
self._GenerateUserInfo(bid_request)
self._GenerateAdSlot(bid_request)
return bid_request
def GeneratePingRequest(self):
"""Generates a special ping request.
A ping request only has the id and is_ping fields set.
Returns:
An instance of realtime_bidding_pb2.BidRequest.
"""
bid_request = realtime_bidding_pb2.BidRequest()
bid_request.id = self._GenerateId(BID_REQUEST_ID_LENGTH)
bid_request.is_ping = True
return bid_request
def _GenerateId(self, length):
"""Generates a random ID.
The generated ID is not guaranteed to be unique.
Args:
length: Length of generated ID in bytes.
Returns:
A random ID of the given length.
"""
random_id = ''
for _ in range(length):
random_id += chr(random.randint(0, 255))
return random_id
def _GeneratePublisherData(self, bid_request):
"""Generates publisher fields.
A random decision is made to choose between anonymous and branded data.
Args:
bid_request: a realtime_bidding_pb2.BidRequest instance
"""
# 50% chance of anonymous ID/branded URL.
if random.choice([True, False]):
url, seller_id, pub_id, seller = random.choice(BRANDED_PUB_DATA)
bid_request.url = url
bid_request.seller_network_id = seller_id
bid_request.publisher_settings_list_id = pub_id
bid_request.DEPRECATED_seller_network = seller
else:
anonymous_id, pub_id = random.choice(ANONYMOUS_PUB_DATA)
bid_request.anonymous_id = anonymous_id
bid_request.publisher_settings_list_id = pub_id
def _GeneratePageInfo(self, bid_request):
"""Generates page information for the given bid_request.
Args:
bid_request: a realtime_bidding_pb2.BidRequest instance
"""
self._GeneratePublisherData(bid_request)
bid_request.detected_language.append(random.choice(LANGUAGE_CODES))
self._GenerateVerticals(bid_request)
def _GenerateAdSlot(self, bid_request):
"""Generates a single ad slot with random data.
Args:
bid_request: a realtime_bidding_pb2.BidRequest instance
"""
ad_slot = bid_request.adslot.add()
ad_slot.id = random.randint(1, MAX_SLOT_ID)
if self._slot_width is not None:
ad_slot.width.append(self._slot_width)
if self._slot_height is not None:
ad_slot.height.append(self._slot_height)
num_included_vendor_types = random.randint(1, MAX_INCLUDED_VENDOR_TYPES)
for allowed_vendor in self._GenerateSet(self._vendor_types,
num_included_vendor_types):
ad_slot.allowed_vendor_type.append(allowed_vendor)
# Generate random excluded creative attributes.
num_excluded_creative_attributes = random.randint(1,
MAX_EXCLUDED_ATTRIBUTES)
for creative_attribute in self._GenerateSet(
CREATIVE_ATTRIBUTES, num_excluded_creative_attributes):
ad_slot.excluded_attribute.append(creative_attribute)
# Generate excluded categories for 20% of requests.
if random.random() < 0.2:
num_excluded_categories = random.randint(1, MAX_EXCLUDED_CATEGORIES)
for excluded_category in self._GenerateSet(AD_CATEGORIES,
num_excluded_categories):
ad_slot.excluded_sensitive_category.append(excluded_category)
# Generate ad slot publisher settings list id by combining bid request
# pub settings id and slot id.
ad_slot.publisher_settings_list_id = (bid_request.publisher_settings_list_id
+ ad_slot.id)
# We generate channels only for branded sites, simplifying by using the
# same list of channels for all publishers.
if bid_request.HasField('seller_network_id'):
# Send only for 10% of bid requests, to simulate that few bid requests
# have targetable channels in reality.
send_channels = random.random < 0.1
if send_channels:
num_targetable_channels = random.randint(1, MAX_TARGETABLE_CHANNELS)
for channel in self._GenerateSet(TARGETABLE_CHANNELS,
num_targetable_channels):
ad_slot.targetable_channel.append(channel)
# Generate adgroup IDs, either randomly or from the ID list parameter
if self._adgroup_ids:
num_matching_adgroups = random.randint(1, len(self._adgroup_ids))
generated_ids = random.sample(self._adgroup_ids, num_matching_adgroups)
else:
num_matching_adgroups = random.randint(1, MAX_MATCHING_ADGROUPS)
generated_ids = [random.randint(1, MAX_ADGROUP_ID)
for _ in range(num_matching_adgroups)]
for generated_id in generated_ids:
ad_data = ad_slot.matching_ad_data.add()
ad_data.adgroup_id = generated_id
ad_data.minimum_cpm_micros = random.randint(1, 99) * 10000
# 10% of adgroup requests will have a direct deal enabled
if random.random() < 0.10:
direct_deal = ad_data.direct_deal.add()
direct_deal.direct_deal_id = random.randint(1, MAX_DIRECT_DEAL_ID)
direct_deal.fixed_cpm_micros = ad_data.minimum_cpm_micros
def _GenerateVerticals(self, bid_request):
"""Populates bid_request with random verticals.
Args:
bid_request: a realtime_bidding_pb2.BidRequest instance.
"""
verticals = self._GenerateSet(VERTICALS, MAX_NUM_VERTICALS)
for vertical in verticals:
vertical_pb = bid_request.detected_vertical.add()
vertical_pb.id = vertical
vertical_pb.weight = random.random()
def _GenerateGoogleID(self, bid_request):
"""Generates the google id field.
If the RandomBidGenerator was initated with a list of Google IDs, one of
these is picked at random, otherwise a random ID is generated.
Args:
bid_request: A realtime_bidding_pb2.BidRequest instance.
"""
if self._google_id_list:
bid_request.google_user_id = random.choice(self._google_id_list)
else:
hashed_cookie = self._GenerateId(COOKIE_LENGTH)
google_user_id = base64.urlsafe_b64encode(hashed_cookie)
# Remove padding, i.e. remove '='s off the end.
bid_request.google_user_id = google_user_id[:google_user_id.find('=')]
# Cookie age of [1 second, 30 days).
bid_request.cookie_age_seconds = random.randint(1, 60*60*24*30)
def _GenerateUserInfo(self, bid_request):
"""Generates random user information.
Args:
bid_request: a realtime_bidding_pb2.BidRequest instance
"""
geo_id, postal, postal_prefix = random.choice(GEO_CRITERIA)
bid_request.geo_criteria_id = geo_id
if postal:
bid_request.postal_code = postal
elif postal_prefix:
bid_request.postal_code_prefix = postal_prefix
self._GenerateGoogleID(bid_request)
bid_request.cookie_version = COOKIE_VERSION
# 4 bytes in IPv4, but last byte is truncated giving an overall length of 3
# bytes.
ip = self._GenerateId(3)
bid_request.ip = ip
def _GenerateSet(self, collection, set_size):
"""Generates a set of randomly chosen elements from the given collection.
Args:
collection: a list-like collection of elements
set_size: the size of set to generate
Returns:
A set of randomly chosen elements from the given collection.
"""
unique_collection = set(collection)
if len(unique_collection) < set_size:
return unique_collection
s = set()
while len(s) < set_size:
s.add(random.choice(collection))
return s
class VideoBidGenerator(DefaultBidGenerator):
"""Video bid request generator."""
def __init__(self, google_id_list=None, adgroup_ids_list=None):
"""Constructor for the video request generator.
Args:
google_id_list: A list of Google IDs (as strings), or None to randomly
generate IDs.
adgroup_ids_list: A list of AdGroup IDs (as ints), or None to randomly
generate IDs.
"""
DefaultBidGenerator.__init__(self, google_id_list, adgroup_ids_list)
self._slot_width = None
self._slot_height = None
self._vendor_types = INSTREAM_VIDEO_VENDOR_TYPES
def _GeneratePageInfo(self, bid_request):
"""Generates page information for the given video bid request.
Args:
bid_request: a realtime_bidding_pb2.BidRequest instance
"""
# Call the base class page info generate method.
super(VideoBidGenerator, self)._GeneratePageInfo(bid_request)
# Add video specific fields.
video = bid_request.video
request_type = random.choice(INSTREAM_VIDEO_TYPES)
if request_type == INSTREAM_VIDEO_MIDROLL:
delay_seconds = random.randint(1,
INSTREAM_VIDEO_START_DELAY_MAX_SECONDS)
video.videoad_start_delay = delay_seconds * 1000 # In milliseconds.
else:
video.videoad_start_delay = request_type
# 50% chance of setting max_ad_duration.
if random.choice([True, False]):
max_ad_duration_seconds = random.randint(
1, INSTREAM_VIDEO_DURATION_MAX_SECONDS)
# In milliseconds.
video.max_ad_duration = max_ad_duration_seconds * 1000
class MobileBidGenerator(DefaultBidGenerator):
"""Mobile bid request generator."""
def __init__(self, google_id_list=None, adgroup_ids_list=None):
"""Constructor for the mobile request generator.
Args:
google_id_list: A list of Google IDs (as strings), or None to randomly
generate IDs.
adgroup_ids_list: A list of AdGroup IDs (as ints), or None to randomly
generate IDs.
"""
DefaultBidGenerator.__init__(self, google_id_list, adgroup_ids_list)
self._slot_width = None
self._slot_height = None
self._vendor_types = MOBILE_VENDOR_TYPES
def GenerateBidRequest(self):
"""Generates a random BidRequest.
Randomly picks device info from available set, sets user agent and screen
sizes before calling parent generate bid request.
Returns:
An instance of realtime_bidding_pb2.BidRequest.
"""
bid_request = realtime_bidding_pb2.BidRequest()
bid_request.is_test = True
bid_request.id = self._GenerateId(BID_REQUEST_ID_LENGTH)
self._GeneratePageInfo(bid_request)
# Pick a mobile device at random.
(platform, os_major_version, os_minor_version, os_micro_version,
device_type, is_app_request, is_interstitial, orientation,
self._slot_width, self._slot_height,
bid_request.user_agent) = random.choice(MOBILE_DEVICE_INFO)
# Add mobile fields
mobile = bid_request.mobile
mobile.carrier_id = random.choice(MOBILE_CARRIERS)
mobile.platform = platform
mobile.os_version.os_version_major = os_major_version
mobile.os_version.os_version_minor = os_minor_version
mobile.os_version.os_version_micro = os_micro_version
mobile.mobile_device_type = device_type
mobile.is_app = is_app_request
mobile.is_interstitial_request = is_interstitial
mobile.screen_orientation = orientation
if is_app_request:
category_ids = None
if platform == 'android':
category_ids = self._GenerateSet(MOBILE_ANDROID_CATEGORY_IDS,
random.randint(1, NUM_CATEGORIES))
mobile.app_id = random.choice(ANDROID_APP_IDS)
else:
category_ids = self._GenerateSet(MOBILE_IOS_CATEGORY_IDS,
random.randint(1, NUM_CATEGORIES))
mobile.app_id = random.choice(IOS_APP_IDS)
for category_id in category_ids:
mobile.app_category_ids.append(category_id)
self._GenerateUserInfo(bid_request)
self._GenerateAdSlot(bid_request)
return bid_request
| {
"content_hash": "9b175ba48e7d5080362fdb6e9cae6953",
"timestamp": "",
"source": "github",
"line_count": 614,
"max_line_length": 80,
"avg_line_length": 38.48208469055375,
"alnum_prop": 0.6634501438970712,
"repo_name": "rickiepark/openbidder",
"id": "f7767f87c3e54a6c1fc12469bc06623a66c43b45",
"size": "23696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "protobuf/requester/generator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4099"
},
{
"name": "C",
"bytes": "13010"
},
{
"name": "C++",
"bytes": "7349667"
},
{
"name": "CMake",
"bytes": "18586"
},
{
"name": "Emacs Lisp",
"bytes": "7798"
},
{
"name": "Java",
"bytes": "1560275"
},
{
"name": "Jupyter Notebook",
"bytes": "70074"
},
{
"name": "M4",
"bytes": "45813"
},
{
"name": "Makefile",
"bytes": "984192"
},
{
"name": "Objective-C",
"bytes": "2324"
},
{
"name": "Protocol Buffer",
"bytes": "535635"
},
{
"name": "Python",
"bytes": "1462065"
},
{
"name": "Shell",
"bytes": "1464001"
},
{
"name": "VimL",
"bytes": "3744"
}
],
"symlink_target": ""
} |
import math
from pandac.PandaModules import *
from direct.interval.MetaInterval import Sequence, Parallel
from direct.interval.FunctionInterval import Func
from direct.interval.LerpInterval import LerpScaleInterval, LerpColorScaleInterval
from direct.showbase.PythonUtil import bound
from toontown.toon import ToonHead
from toontown.minigame.CannonGameGlobals import *
from toontown.toonbase import ToontownGlobals
from toontown.parties.PartyUtils import toRadians, calcVelocity
from direct.showbase.PythonUtil import StackTrace
from otp.nametag.NametagFloat3d import NametagFloat3d
from otp.nametag.Nametag import Nametag
CANNON_ROTATION_MIN = -70
CANNON_ROTATION_MAX = 70
INITIAL_VELOCITY = 80.0
CANNON_BARREL_TOONHEAD_Y = 6.0
class Cannon:
notify = directNotify.newCategory('DistributedPartyCannon')
def __init__(self, parent, pos = Point3(0, 0, 0)):
self.__previousRotation = 0.0
self.__previousAngle = 0.0
self._rotation = 0.0
self._angle = 0.0
self._position = pos
self._parent = parent
self.parentNode = None
self.cannonNode = None
self.barrelNode = None
self.sndCannonMove = None
self.sndCannonFire = None
self.collSphere = None
self.collNode = None
self.collNodePath = None
self.toonInside = None
self.toonHead = None
self.toonOriginalScale = 0.0
self.toonParentNode = None
return
def reset(self):
self.setRotation(0)
self.setAngle((CANNON_ANGLE_MIN + CANNON_ANGLE_MAX * 0.5) * 0.5)
self.parentNode.setPos(self._position)
self.updateModel()
def load(self, nodeName):
self.parentNode = NodePath(nodeName)
self.cannonNode = loader.loadModel('phase_4/models/minigames/toon_cannon')
self.cannonNode.reparentTo(self.parentNode)
self.barrelNode = self.cannonNode.find('**/cannon')
self.shadowNode = self.cannonNode.find('**/square_drop_shadow')
self.smokeNode = loader.loadModel('phase_4/models/props/test_clouds')
self.smokeNode.setBillboardPointEye()
self.sndCannonMove = base.loadSfx('phase_4/audio/sfx/MG_cannon_adjust.ogg')
self.sndCannonFire = base.loadSfx('phase_4/audio/sfx/MG_cannon_fire_alt.ogg')
self.collSphere = CollisionSphere(0, 0, 0, self.getSphereRadius())
self.collSphere.setTangible(1)
self.collNode = CollisionNode(self.getCollisionName())
self.collNode.setCollideMask(ToontownGlobals.WallBitmask)
self.collNode.addSolid(self.collSphere)
self.collNodePath = self.parentNode.attachNewNode(self.collNode)
def unload(self):
self.__cleanupToonInside()
if self.cannonNode:
self.cannonNode.removeNode()
self.cannonNode = None
if self.smokeNode:
self.smokeNode.removeNode()
self.smokeNode = None
del self.sndCannonMove
del self.sndCannonFire
del self._position
self.ignoreAll()
return
def updateModel(self, rotation = None, angle = None):
if rotation:
self.rotation = rotation
if angle:
self.angle = angle
self.cannonNode.setHpr(self._rotation, 0, 0)
self.barrelNode.setHpr(0, self._angle, 0)
maxP = 90
newP = self.barrelNode.getP()
yScale = 1 - 0.5 * float(newP) / maxP
self.shadowNode.setScale(1, yScale, 1)
def playFireSequence(self):
self.smokeNode.reparentTo(self.barrelNode)
self.smokeNode.setPos(0, 6, -3)
self.smokeNode.setScale(0.5)
self.smokeNode.wrtReparentTo(render)
track = Sequence(Parallel(LerpScaleInterval(self.smokeNode, 0.5, 3), LerpColorScaleInterval(self.smokeNode, 0.5, Vec4(2, 2, 2, 0))), Func(self.smokeNode.reparentTo, hidden), Func(self.smokeNode.clearColorScale))
base.playSfx(self.sndCannonFire)
track.start()
def loopMovingSound(self):
base.playSfx(self.sndCannonMove, looping=1)
def stopMovingSound(self):
self.sndCannonMove.stop()
def show(self):
self.reset()
self.parentNode.reparentTo(self._parent)
def hide(self):
self.parentNode.reparentTo(hidden)
def placeToonInside(self, toon):
self.__setToonInside(toon)
self.__createToonHead(toon)
self.__placeToon()
def __setToonInside(self, toon):
self.toonInside = toon
toonName = 'None'
if toon:
toonName = toon.getName()
self.notify.debug('__setToonInside self.toonInside=%s\nstack=%s' % (toonName, StackTrace().compact()))
self.toonInside.stopSmooth()
self.toonOriginalScale = toon.getScale()
toon.useLOD(1000)
self.toonParentNode = render.attachNewNode('toonOriginChange')
self.toonInside.wrtReparentTo(self.toonParentNode)
self.toonInside.setPosHpr(0, 0, -(self.toonInside.getHeight() / 2.0), 0, -90, 0)
def __createToonHead(self, toon):
self.toonHead = ToonHead.ToonHead()
self.toonHead.setupHead(toon.style)
self.toonHead.reparentTo(hidden)
tag = NametagFloat3d()
tag.setContents(Nametag.CSpeech | Nametag.CThought)
tag.setBillboardOffset(0)
tag.setAvatar(self.toonHead)
toon.nametag.addNametag(tag)
tagPath = self.toonHead.attachNewNode(tag)
tagPath.setPos(0, 0, 1)
self.toonHead.tag = tag
def __placeToon(self):
self.showToonHead()
self.toonHead.setPosHpr(0, CANNON_BARREL_TOONHEAD_Y, 0, 0, -45, 0)
scale = self.toonOriginalScale
self.toonHead.setScale(render, scale[0], scale[1], scale[2])
self.toonParentNode.reparentTo(hidden)
self.toonParentNode.setPos(self.getToonFirePos())
def showToonHead(self):
if self.toonHead:
self.toonHead.startBlink()
self.toonHead.startLookAround()
self.toonHead.reparentTo(self.barrelNode)
def hideToonHead(self):
if self.toonHead:
self.toonHead.stopBlink()
self.toonHead.stopLookAroundNow()
self.toonHead.reparentTo(hidden)
def removeToonReadyToFire(self):
toon = self.toonInside
self.toonInside.wrtReparentTo(render)
y = self.toonHead.getY(self.barrelNode) - (self.toonInside.getHeight() - self.toonHead.getHeight())
self.toonInside.setPosHpr(self.barrelNode, 0, y, 0, 0, -90, 0)
return self.__removeToon()
def removeToonDidNotFire(self):
self.toonInside.reparentTo(self.cannonNode)
self.toonInside.setPos(2, -4, 0)
self.toonInside.wrtReparentTo(render)
return self.__removeToon()
def __removeToon(self):
self.stopMovingSound()
toonNode = self.toonParentNode
self.toonInside.resetLOD()
self.hideToonHead()
self.__cleanupToonInside()
return toonNode
def __cleanupToonInside(self):
toonName = 'None'
if self.toonInside:
toonName = self.toonInside.getName()
self.notify.debug('__cleanupToonInside self.toonInside=%s\nstack=%s' % (toonName, StackTrace().compact()))
if self.toonHead != None:
self.hideToonHead()
if hasattr(self.toonInside, 'nametag'):
self.toonInside.nametag.removeNametag(self.toonHead.tag)
self.toonHead.delete()
self.toonHead = None
self.toonInside = None
self.toonParentNode = None
return
def getSphereRadius(self):
return 1.5
def getCollisionName(self):
return self.parentNode.getName() + 'Collision'
def getEnterCollisionName(self):
return 'enter' + self.getCollisionName()
def isToonInside(self):
return self.toonHead != None
def getToonInside(self):
return self.toonInside
def setRotation(self, rotation):
self.__previousRotation = self._rotation
self._rotation = bound(rotation, CANNON_ROTATION_MIN, CANNON_ROTATION_MAX)
def getRotation(self):
return self._rotation
def setAngle(self, angle):
self.__previousAngle = self._angle
self._angle = bound(angle, CANNON_ANGLE_MIN, CANNON_ANGLE_MAX)
def getAngle(self):
return self._angle
def hasMoved(self):
return self.__previousRotation != self._rotation or self.__previousAngle != self._angle
def getBarrelHpr(self, node):
return self.barrelNode.getHpr(node)
def getToonFirePos(self):
if self.toonHead:
return self.toonHead.getPos(render)
return Point3.zero()
def getToonFireHpr(self):
if self.toonHead:
return self.toonHead.getHpr(render)
return Point3.zero()
def getToonFireVel(self):
hpr = self.barrelNode.getHpr(render)
rotation = toRadians(hpr[0])
angle = toRadians(hpr[1])
return calcVelocity(rotation, angle, initialVelocity=INITIAL_VELOCITY)
| {
"content_hash": "044b7cac6537d1498b3525149dbc9ded",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 219,
"avg_line_length": 36.048,
"alnum_prop": 0.6525743453173546,
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"id": "a6c7fd1dc13aadaed627325d087b4ccb807ab74b",
"size": "9012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/parties/Cannon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1703277"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "5468044"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "4611"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Objective-C",
"bytes": "23212"
},
{
"name": "Puppet",
"bytes": "5245"
},
{
"name": "Python",
"bytes": "34010215"
},
{
"name": "Shell",
"bytes": "11192"
},
{
"name": "Tcl",
"bytes": "1981257"
}
],
"symlink_target": ""
} |
"""
find 3 numbers that add up to 1000
that are also a pythagorean triplet
iterate through a list of sum sets checking with is_py_trip helper function
return the product of a b & c
"""
def is_py_trip(a, b, c):
if a**2 + b**2 == c**2:
return True
else:
return False
def find_py_trip(ptsum):
for aval in range(1, (ptsum / 3)):
for bval in range(aval + 1, ptsum):
cval = ptsum - aval - bval
check = is_py_trip(aval, bval, cval)
#print aval, bval, cval, 'is', check
if check:
return aval * bval * cval
bval += 1
aval += 1
print find_py_trip(1000)
| {
"content_hash": "2355011a82d6d46455b9e2b72ab9fd69",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 23.892857142857142,
"alnum_prop": 0.5530642750373692,
"repo_name": "chadhs/project-euler",
"id": "0c45b7e1c387430037ffed315aa55eb1f0583137",
"size": "692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prob0009.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Clojure",
"bytes": "530"
},
{
"name": "Python",
"bytes": "11467"
}
],
"symlink_target": ""
} |
from nussl import ml, datasets
from nussl.core.constants import ALL_WINDOWS
import nussl
import pytest
import torch
import itertools
from scipy.signal import check_COLA
import numpy as np
def test_filter_bank(one_item, monkeypatch):
pytest.raises(
NotImplementedError, ml.networks.modules.FilterBank, 2048)
def dummy_filters(self):
num_filters = (1 + self.filter_length // 2) * 2
random_basis = torch.randn(
self.filter_length, num_filters)
return random_basis.float()
def dummy_inverse(self):
num_filters = (1 + self.filter_length // 2) * 2
random_basis = torch.randn(
self.filter_length, num_filters)
return random_basis.float().T
monkeypatch.setattr(
ml.networks.modules.FilterBank,
'get_transform_filters',
dummy_filters
)
monkeypatch.setattr(
ml.networks.modules.FilterBank,
'get_inverse_filters',
dummy_inverse
)
representation = ml.networks.modules.FilterBank(
512, hop_length=128)
data = one_item['mix_audio']
encoded = representation(data, 'transform')
decoded = representation(encoded, 'inverse')
one_sided_shape = list(
encoded.squeeze(0).shape)
one_sided_shape[1] = one_sided_shape[1] // 2
assert tuple(one_sided_shape) == tuple(one_item['mix_magnitude'].shape[1:])
data = one_item['source_audio']
encoded = representation(data, 'transform')
decoded = representation(encoded, 'inverse')
assert decoded.shape == data.shape
def test_filter_bank_alignment(one_item):
# if we construct a signal with an impulse at a random
# offset, it should stay in the same place after the
# stft
win_length = 256
hop_length = 64
win_type = 'sqrt_hann'
representation = ml.networks.modules.STFT(
win_length, hop_length=hop_length, window_type=win_type)
data = torch.zeros_like(one_item['source_audio'])
for _ in range(20):
offset = np.random.randint(0, data.shape[-2])
data[..., offset, 0] = 1
encoded = representation(data, 'transform')
decoded = representation(encoded, 'inverse')
assert torch.allclose(decoded, data, atol=1e-6)
sr = nussl.constants.DEFAULT_SAMPLE_RATE
# Define my window lengths to be powers of 2, ranging from 128 to 2048 samples
win_min = 7 # 2 ** 7 = 128
win_max = 11 # 2 ** 11 = 2048
win_lengths = [2 ** i for i in range(win_min, win_max + 1)]
win_length_32ms = int(2 ** (np.ceil(np.log2(nussl.constants.DEFAULT_WIN_LEN_PARAM * sr))))
win_lengths.append(win_length_32ms)
hop_length_ratios = [0.5, .25]
window_types = ALL_WINDOWS
signals = []
combos = itertools.product(win_lengths, hop_length_ratios, window_types)
@pytest.mark.parametrize("combo", combos)
def test_stft_module(combo, one_item):
win_length = combo[0]
hop_length = int(combo[0] * combo[1])
win_type = combo[2]
window = nussl.AudioSignal.get_window(combo[2], win_length)
stft_params = nussl.STFTParams(
window_length=win_length, hop_length=hop_length, window_type=win_type
)
representation = ml.networks.modules.STFT(
win_length, hop_length=hop_length, window_type=win_type)
if not check_COLA(window, win_length, win_length - hop_length):
assert True
data = one_item['mix_audio']
encoded = representation(data, 'transform')
decoded = representation(encoded, 'inverse')
encoded = encoded.squeeze(0).permute(1, 0, 2)
assert (decoded - data).abs().max() < 1e-5
audio_signal = nussl.AudioSignal(
audio_data_array=data.squeeze(0).numpy(), sample_rate=16000, stft_params=stft_params
)
nussl_magnitude = np.abs(audio_signal.stft())
_encoded = encoded.squeeze(0)
cutoff = _encoded.shape[0] // 2
_encoded = _encoded[:cutoff, ...]
assert (_encoded - nussl_magnitude).abs().max() < 1e-6
def test_learned_filterbank(one_item):
representation = ml.networks.modules.LearnedFilterBank(
512, hop_length=128, requires_grad=True)
data = one_item['mix_audio']
encoded = representation(data, 'transform')
decoded = representation(encoded, 'inverse')
data = one_item['source_audio']
encoded = representation(data, 'transform')
decoded = representation(encoded, 'inverse')
assert decoded.shape == data.shape
| {
"content_hash": "cce6a7d1b1841098f1990357814b04d1",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 92,
"avg_line_length": 30.51048951048951,
"alnum_prop": 0.6582626633050653,
"repo_name": "interactiveaudiolab/nussl",
"id": "88754b4f93df79a90b814cefaf148d32280d5d03",
"size": "4363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ml/test_filterbank.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "MATLAB",
"bytes": "11692"
},
{
"name": "Python",
"bytes": "591205"
},
{
"name": "Shell",
"bytes": "26"
}
],
"symlink_target": ""
} |
import temper
def main(stats):
th = temper.TemperHandler()
device = th.get_devices()[0]
c = device.get_temperature(format='celsius')
stats.gauge('room.temperature', c)
if __name__ == '__main__':
main()
| {
"content_hash": "bf424e48bc5500d4a9495e0221c50a35",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 48,
"avg_line_length": 18.833333333333332,
"alnum_prop": 0.6106194690265486,
"repo_name": "gak/mystats",
"id": "c727619286cb9e1c0ccad224655687414da6f4ec",
"size": "248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "temper/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9405"
}
],
"symlink_target": ""
} |
"""Helper CGI for POST uploads.
Utility library contains the main logic behind simulating the blobstore
uploading mechanism.
Contents:
GenerateBlobKey: Function for generation unique blob-keys.
UploadCGIHandler: Main CGI handler class for post uploads.
"""
import base64
import cStringIO
import datetime
import random
import time
import hashlib
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api.blobstore import blobstore
try:
from email.mime import base
from email.mime import multipart
from email import generator
except ImportError:
from email import Generator as generator
from email import MIMEBase as base
from email import MIMEMultipart as multipart
STRIPPED_HEADERS = frozenset(('content-length',
'content-md5',
'content-type',
))
MAX_STRING_NAME_LENGTH = 500
class Error(Exception):
"""Base class for upload processing errors."""
class InvalidMIMETypeFormatError(Error):
"""MIME type was formatted incorrectly."""
class UploadEntityTooLargeError(Error):
"""Entity being uploaded exceeded the allowed size."""
class FilenameOrContentTypeTooLargeError(Error):
"""The filename or content type exceeded the allowed size."""
def __init__(self, invalid_field):
Error.__init__(self,
'The %s exceeds the maximum allowed length of %s.' % (
invalid_field, MAX_STRING_NAME_LENGTH))
class InvalidMetadataError(Error):
"""The filename or content type of the entity was not a valid UTF-8 string."""
def GenerateBlobKey(time_func=time.time, random_func=random.random):
"""Generate a unique BlobKey.
BlobKey is generated using the current time stamp combined with a random
number. The two values are subject to an md5 digest and base64 url-safe
encoded. The new key is checked against the possibility of existence within
the datastore and the random number is regenerated until there is no match.
Args:
time_func: Function used for generating the timestamp. Used for
dependency injection. Allows for predictable results during tests.
Must return a floating point UTC timestamp.
random_func: Function used for generating the random number. Used for
dependency injection. Allows for predictable results during tests.
Returns:
String version of BlobKey that is unique within the BlobInfo datastore.
None if there are too many name conflicts.
"""
timestamp = str(time_func())
tries = 0
while tries < 10:
number = str(random_func())
digester = hashlib.md5()
digester.update(timestamp)
digester.update(number)
blob_key = base64.urlsafe_b64encode(digester.digest())
datastore_key = datastore.Key.from_path(blobstore.BLOB_INFO_KIND,
blob_key,
namespace='')
try:
datastore.Get(datastore_key)
tries += 1
except datastore_errors.EntityNotFoundError:
return blob_key
return None
def _SplitMIMEType(mime_type):
"""Split MIME-type in to main and sub type.
Args:
mime_type: full MIME type string.
Returns:
(main, sub):
main: Main part of mime type (application, image, text, etc).
sub: Subtype part of mime type (pdf, png, html, etc).
Raises:
InvalidMIMETypeFormatError: If form item has incorrectly formatted MIME
type.
"""
if mime_type:
mime_type_array = mime_type.split('/')
if len(mime_type_array) == 1:
raise InvalidMIMETypeFormatError('Missing MIME sub-type.')
elif len(mime_type_array) == 2:
main_type, sub_type = mime_type_array
if not(main_type and sub_type):
raise InvalidMIMETypeFormatError(
'Incorrectly formatted MIME type: %s' % mime_type)
return main_type, sub_type
else:
raise InvalidMIMETypeFormatError(
'Incorrectly formatted MIME type: %s' % mime_type)
else:
return 'application', 'octet-stream'
class UploadCGIHandler(object):
"""Class used for handling an upload post.
The main interface to this class is the UploadCGI method. This will receive
the upload form, store the blobs contained in the post and rewrite the blobs
to contain BlobKeys instead of blobs.
"""
def __init__(self,
blob_storage,
generate_blob_key=GenerateBlobKey,
now_func=datetime.datetime.now):
"""Constructor.
Args:
blob_storage: BlobStorage instance where actual blobs are stored.
generate_blob_key: Function used for generating unique blob keys.
now_func: Function that returns the current timestamp.
"""
self.__blob_storage = blob_storage
self.__generate_blob_key = generate_blob_key
self.__now_func = now_func
def StoreBlob(self, form_item, creation):
"""Store form-item to blob storage.
Args:
form_item: FieldStorage instance that represents a specific form field.
This instance should have a non-empty filename attribute, meaning that
it is an uploaded blob rather than a normal form field.
creation: Timestamp to associate with new blobs creation time. This
parameter is provided so that all blobs in the same upload form can have
the same creation date.
Returns:
datastore.Entity('__BlobInfo__') associated with the upload.
"""
main_type, sub_type = _SplitMIMEType(form_item.type)
blob_key = self.__generate_blob_key()
blob_file = form_item.file
if 'Content-Transfer-Encoding' in form_item.headers:
if form_item.headers['Content-Transfer-Encoding'] == 'base64':
blob_file = cStringIO.StringIO(
base64.urlsafe_b64decode(blob_file.read()))
self.__blob_storage.StoreBlob(blob_key, blob_file)
content_type_formatter = base.MIMEBase(main_type, sub_type,
**form_item.type_options)
blob_entity = datastore.Entity('__BlobInfo__',
name=str(blob_key),
namespace='')
try:
blob_entity['content_type'] = (
content_type_formatter['content-type'].decode('utf-8'))
blob_entity['creation'] = creation
blob_entity['filename'] = form_item.filename.decode('utf-8')
except UnicodeDecodeError:
raise InvalidMetadataError(
'The uploaded entity contained invalid UTF-8 metadata. This may be '
'because the page containing the upload form was served with a '
'charset other than "utf-8".')
blob_file.seek(0)
digester = hashlib.md5()
while True:
block = blob_file.read(1 << 20)
if not block:
break
digester.update(block)
blob_entity['md5_hash'] = digester.hexdigest()
blob_entity['size'] = blob_file.tell()
blob_file.seek(0)
datastore.Put(blob_entity)
return blob_entity
def _GenerateMIMEMessage(self,
form,
boundary=None,
max_bytes_per_blob=None,
max_bytes_total=None,
bucket_name=None):
"""Generate a new post from original form.
Also responsible for storing blobs in the datastore.
Args:
form: Instance of cgi.FieldStorage representing the whole form
derived from original post data.
boundary: Boundary to use for resulting form. Used only in tests so
that the boundary is always consistent.
max_bytes_per_blob: The maximum size in bytes that any single blob
in the form is allowed to be.
max_bytes_total: The maximum size in bytes that the total of all blobs
in the form is allowed to be.
bucket_name: The name of the Google Storage bucket to uplad the file.
Returns:
A MIMEMultipart instance representing the new HTTP post which should be
forwarded to the developers actual CGI handler. DO NOT use the return
value of this method to generate a string unless you know what you're
doing and properly handle folding whitespace (from rfc822) properly.
Raises:
UploadEntityTooLargeError: The upload exceeds either the
max_bytes_per_blob or max_bytes_total limits.
FilenameOrContentTypeTooLargeError: The filename or the content_type of
the upload is larger than the allowed size for a string type in the
datastore.
"""
message = multipart.MIMEMultipart('form-data', boundary)
for name, value in form.headers.items():
if name.lower() not in STRIPPED_HEADERS:
message.add_header(name, value)
def IterateForm():
"""Flattens form in to single sequence of cgi.FieldStorage instances.
The resulting cgi.FieldStorage objects are a little bit irregular in
their structure. A single name can have mulitple sub-items. In this
case, the root FieldStorage object has a list associated with that field
name. Otherwise, the root FieldStorage object just refers to a single
nested instance.
Lists of FieldStorage instances occur when a form has multiple values
for the same name.
Yields:
cgi.FieldStorage irrespective of their nesting level.
"""
for key in sorted(form):
form_item = form[key]
if isinstance(form_item, list):
for list_item in form_item:
yield list_item
else:
yield form_item
creation = self.__now_func()
total_bytes_uploaded = 0
created_blobs = []
upload_too_large = False
filename_too_large = False
content_type_too_large = False
for form_item in IterateForm():
disposition_parameters = {'name': form_item.name}
if form_item.filename is None:
variable = base.MIMEBase('text', 'plain')
variable.set_payload(form_item.value)
else:
if not form_item.filename:
continue
disposition_parameters['filename'] = form_item.filename
main_type, sub_type = _SplitMIMEType(form_item.type)
form_item.file.seek(0, 2)
content_length = form_item.file.tell()
form_item.file.seek(0)
total_bytes_uploaded += content_length
if max_bytes_per_blob is not None:
if max_bytes_per_blob < content_length:
upload_too_large = True
break
if max_bytes_total is not None:
if max_bytes_total < total_bytes_uploaded:
upload_too_large = True
break
if form_item.filename is not None:
if MAX_STRING_NAME_LENGTH < len(form_item.filename):
filename_too_large = True
break
if form_item.type is not None:
if MAX_STRING_NAME_LENGTH < len(form_item.type):
content_type_too_large = True
break
blob_entity = self.StoreBlob(form_item, creation)
created_blobs.append(blob_entity)
variable = base.MIMEBase('message',
'external-body',
access_type=blobstore.BLOB_KEY_HEADER,
blob_key=blob_entity.key().name())
form_item.file.seek(0)
digester = hashlib.md5()
while True:
block = form_item.file.read(1 << 20)
if not block:
break
digester.update(block)
blob_key = base64.urlsafe_b64encode(digester.hexdigest())
form_item.file.seek(0)
external = base.MIMEBase(main_type,
sub_type,
**form_item.type_options)
headers = dict(form_item.headers)
headers['Content-Length'] = str(content_length)
headers[blobstore.UPLOAD_INFO_CREATION_HEADER] = (
blobstore._format_creation(creation))
if bucket_name:
headers[blobstore.CLOUD_STORAGE_OBJECT_HEADER] = (
'/gs/%s/fake-%s-%s' % (bucket_name, blob_entity.key().name(),
blob_key))
headers['Content-MD5'] = blob_key
for key, value in headers.iteritems():
external.add_header(key, value)
external_disposition_parameters = dict(disposition_parameters)
external_disposition_parameters['filename'] = form_item.filename
if not external.get('Content-Disposition'):
external.add_header('Content-Disposition',
'form-data',
**external_disposition_parameters)
variable.set_payload([external])
variable.add_header('Content-Disposition',
'form-data',
**disposition_parameters)
message.attach(variable)
if upload_too_large or filename_too_large or content_type_too_large:
for blob in created_blobs:
datastore.Delete(blob)
if upload_too_large:
raise UploadEntityTooLargeError()
elif filename_too_large:
raise FilenameOrContentTypeTooLargeError('filename')
else:
raise FilenameOrContentTypeTooLargeError('content-type')
return message
def GenerateMIMEMessageString(self,
form,
boundary=None,
max_bytes_per_blob=None,
max_bytes_total=None,
bucket_name=None):
"""Generate a new post string from original form.
Args:
form: Instance of cgi.FieldStorage representing the whole form
derived from original post data.
boundary: Boundary to use for resulting form. Used only in tests so
that the boundary is always consistent.
max_bytes_per_blob: The maximum size in bytes that any single blob
in the form is allowed to be.
max_bytes_total: The maximum size in bytes that the total of all blobs
in the form is allowed to be.
bucket_name: The name of the Google Storage bucket to uplad the file.
Returns:
A string rendering of a MIMEMultipart instance.
"""
message = self._GenerateMIMEMessage(form,
boundary=boundary,
max_bytes_per_blob=max_bytes_per_blob,
max_bytes_total=max_bytes_total,
bucket_name=bucket_name)
message_out = cStringIO.StringIO()
gen = generator.Generator(message_out, maxheaderlen=0)
gen.flatten(message, unixfrom=False)
return message_out.getvalue()
| {
"content_hash": "a6b729497db51bb22a45c19f296566f5",
"timestamp": "",
"source": "github",
"line_count": 439,
"max_line_length": 80,
"avg_line_length": 33.46924829157175,
"alnum_prop": 0.6294834274824747,
"repo_name": "dcroc16/skunk_works",
"id": "db53c5db017fcf979dfa2a04e40fef58246c00bd",
"size": "15294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google_appengine/google/appengine/tools/dev_appserver_upload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "407860"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "CSS",
"bytes": "251658"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "JavaScript",
"bytes": "784750"
},
{
"name": "PHP",
"bytes": "2381119"
},
{
"name": "Python",
"bytes": "51887444"
},
{
"name": "Shell",
"bytes": "32889"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0003_auto_20150311_2105'),
]
operations = [
migrations.AddField(
model_name='onlineuser',
name='online_mail',
field=models.CharField(max_length=50, null=True, verbose_name='Online-epost', blank=True),
preserve_default=True,
),
]
| {
"content_hash": "1630d6af9cc5ad8673738bfd9a9b4118",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 102,
"avg_line_length": 25.764705882352942,
"alnum_prop": 0.6004566210045662,
"repo_name": "dotKom/onlineweb4",
"id": "39726a25aa188821436129da951ed899b3998f4c",
"size": "464",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "apps/authentication/migrations/0004_onlineuser_online_mail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "71414"
},
{
"name": "HTML",
"bytes": "463894"
},
{
"name": "JavaScript",
"bytes": "745404"
},
{
"name": "Python",
"bytes": "925584"
},
{
"name": "Shell",
"bytes": "3130"
},
{
"name": "Standard ML",
"bytes": "1088"
}
],
"symlink_target": ""
} |
"""
This module considers dictionaries as discrete functional mappings, and
contains high-level interfaces for dealing with these mappings.
"""
from itertools import chain
from six import iteritems
def chain_mapping(*dictionaries):
"""
Say you have more than one dictionary, and the range of the first is
the domain of the second. You can create a new dictionary with the
domain of the first but range of the second using this method.
>>> chain_mapping({1: 'dog'}, {'dog': 3.0})
{1: 3.0}
"""
base_dict = dictionaries[0].copy()
for dictionary in dictionaries[1:]:
map_dict(dictionary.__getitem__, base_dict, in_place=True)
return base_dict
def invert_mapping(dictionary):
"""
Inverts a dictionary with a one-to-many mapping from key to value to a
new dictionary with a one-to-many mapping from value to key.
"""
inverted_dict = {}
for key, values in iteritems(dictionary):
for value in values:
inverted_dict.setdefault(value, []).append(key)
return inverted_dict
def is_injective(dictionary):
"""
Returns True if the mapping is one-to-one, False otherwise.
Mapping keys are naturally unique, so this method just verifies that the
values are also unique. This requires that the values are all hashable.
"""
covered = set()
for key, value in iteritems(dictionary):
if value in covered:
return False
covered.add(value)
else:
return True
def invert_injective_mapping(dictionary):
"""
Inverts a dictionary with a one-to-one mapping from key to value, into a
new dictionary with a one-to-one mapping from value to key.
"""
inverted_dict = {}
for key, value in iteritems(dictionary):
assert value not in inverted_dict, "Mapping is not 1-1"
inverted_dict[value] = key
return inverted_dict
def map_dict(method, dictionary, in_place=False):
"""
Applies the method to every value in the dictionary, ignoring keys. If the
in_place keyword is True, the existing dictionary is modified and returned.
:param method:
The method to apply.
:param dictionary:
The dictionary whose values to apply the method to.
:return:
A dictionary with the updated values.
"""
if in_place:
# Modify the dictionary in-place.
for key, value in iteritems(dictionary):
dictionary[key] = method(value)
return dictionary
else:
# Return the modified dictionary.
return dict((k, method(v)) for (k, v) in iteritems(dictionary))
def multi_dict(input_pairs):
"""
Similar to casting pairs to a dictionary, except that repeated pairs
are allowed. To show the difference:
>>> dict( [('a', 1), ('b', 2), ('a', 3)] )
{'a': 3, 'b': 2}
>>> multi_dict( [('a', 1), ('b', 2), ('a', 3)] )
{'a': [1, 3], 'b': [2]}
:param input_pairs:
A list of (key, value) pairs.
:return:
A dictionary mapping keys to lists of values.
"""
output_dict = {}
for key, value in input_pairs:
existing_values = output_dict.get(key, [])
existing_values.append(value)
output_dict[key] = existing_values
return output_dict
def procmap(method, item_list):
"""
Like :py:func:`map`, but where the method being applied has no return value. In
other words, the procedure is called on every item in the list
sequentially, but since each call has no return value, the call to
:func:`procmap` also has no return value.
:param method:
The procedure to call each time.
:param item_list:
The list of items to apply the procedure to.
:return:
:py:const:`None`
"""
for item in item_list:
method(item)
def merge_dicts(*args):
"""
Merges a number of dictionaries together into one. Assumes the dictionary
maps to a set of hashable items. The result for each key is the union of
all the values in the provided dictionaries.
"""
unified_dict = {}
for key, items in chain(*[iteritems(d) for d in args]):
if key in unified_dict:
unified_dict[key].update(items)
else:
unified_dict[key] = set(items)
return unified_dict
def partial_map(method, object_seq):
"""
Like map, but filters out all objects with an non-true result, and
returns them in a separate items list. I.e. any item for which the map
resulted in a non-true value is provided in a "reject" list.
:param func method:
The method to perform on each item.
:param sequence object_seq:
The sequence of items to apply the method to.
:return:
The tuple of lists (mapped_items, rejected_items).
:rtype: :py:class:`tuple`
"""
mapped = []
rejected = []
for item in object_seq:
result = method(*(item,))
if result:
mapped.append(result)
else:
rejected.append(item)
return mapped, rejected
def filtered_map(method, object_list):
"""
Performs a map then a filter on the object list. The final filter strips
out objects which evaluate to False.
"""
return [x for x in map(method, object_list) if x]
| {
"content_hash": "e08c8c9d6a3541e93468da4efb1586db",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 83,
"avg_line_length": 27.952879581151834,
"alnum_prop": 0.6283948304926016,
"repo_name": "larsyencken/cjktools",
"id": "5c8adcd129896bdd4e4749a6c457d3cdb02ded2f",
"size": "5391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cjktools/maps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "163950"
},
{
"name": "Shell",
"bytes": "313"
}
],
"symlink_target": ""
} |
"""
Emulating the functionality of the Adafruit_DHT driver.
This is required because the DHT22 driver utilizes native C code
that will not run on hardware that is not an RPi. This stub class allows
testing code that relies on the Adafruit_DHT driver, without running on
an RPi
"""
from random import uniform
DHT22 = 22
SENSORS = [DHT22]
def read_retry(sensor, pin):
"""Emulate the functionality of the Adafruit_DHT read_retry() method, without hardware"""
def gen_rand():
return uniform(50.0000, 100.0000)
return (gen_rand(), gen_rand())
| {
"content_hash": "efd5ae5e4dce492e89be4a73f6e68214",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 93,
"avg_line_length": 29.63157894736842,
"alnum_prop": 0.7282415630550622,
"repo_name": "arunderwood/BasilPi",
"id": "03b33a97f8876a47cc7752e94ba010eb65bcf6e3",
"size": "563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/Adafruit_DHT_stub.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "296"
},
{
"name": "Python",
"bytes": "15846"
}
],
"symlink_target": ""
} |
from picamera import PiCamera
from time import sleep
camera = PiCamera()
camera.start_preview(alpha=200)
sleep(10)
camera.stop_preview() | {
"content_hash": "873775ebf8defec6db8801afa2e87c7e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 31,
"avg_line_length": 17.25,
"alnum_prop": 0.7898550724637681,
"repo_name": "leea666/picamera",
"id": "6b41386e70b2ecaadac4f524ace1c685c3fc45ad",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cameraTests/camera.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "235"
}
],
"symlink_target": ""
} |
from django.db import transaction
from django.contrib import admin
admin.site.index_title = 'Categories'
admin.site.site_title = 'Citizenmatch Administration'
admin.site.site_header = 'Citizenmatch Administration'
| {
"content_hash": "86e35a64066917bdf836ab67a6455bd7",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 54,
"avg_line_length": 30.857142857142858,
"alnum_prop": 0.8101851851851852,
"repo_name": "kajala/citizenmatch-backend",
"id": "0c119fc5ee3ab379b0afeb99473ae1425fecc59d",
"size": "216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backoffice/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46752"
}
],
"symlink_target": ""
} |
import sys
from pyparsing import *
class Opcode(object):
def __init__(self, opcode):
self._opcode = opcode
def __repr__(self):
return 'Opcode(' + self._opcode + ')'
def to_das(self):
return self._opcode
class SETOpcode(Opcode):
def __init__(self):
super(SETOpcode, self).__init__('SET')
def __repr__(self):
return 'SETOpcode()'
def to_llvm(self, out, arguments):
arguments[0].to_llvm_store(out, arguments[1].to_llvm(out))
class OUTOpcode(Opcode):
def __init__(self):
super(OUTOpcode, self).__init__('OUT')
def __repr__(self):
return 'OUTOpcode()'
def to_llvm(self, out, arguments):
out.write_line('call void @output(i16 %s)' % (arguments[0].to_llvm(out)))
class DBGOpcode(Opcode):
def __init__(self):
super(DBGOpcode, self).__init__('DBG')
def __repr__(self):
return 'DBGOpcode()'
def to_llvm(self, out, arguments):
out.dump_regs(True)
out.write_line('call void @debug(%struct.VMState* %state)')
class ADDOpcode(Opcode):
def __init__(self):
super(ADDOpcode, self).__init__('ADD')
def __repr__(self):
return 'ADDOpcode()'
def to_llvm(self, out, arguments):
arg0 = arguments[0].to_llvm(out)
arg1 = arguments[1].to_llvm(out)
tmp1 = out.temp_variable()
tmp2 = out.temp_variable()
tmp3 = out.temp_variable()
tmp4 = out.temp_variable()
out.write_line('%s = zext i16 %s to i32' % (tmp1, arg0))
out.write_line('%s = zext i16 %s to i32' % (tmp2, arg1))
out.write_line('%s = add i32 %s, %s' % (tmp3, tmp1, tmp2))
out.write_line('%s = trunc i32 %s to i16' % (tmp4, tmp3))
arguments[0].to_llvm_store(out, tmp4)
# overflow
tmp5 = out.temp_variable()
tmp6 = out.temp_variable()
out.write_line('%s = lshr i32 %s, 16' % (tmp5, tmp3))
out.write_line('%s = trunc i32 %s to i16' % (tmp6, tmp5))
out.set_reg('O', tmp6)
class MULOpcode(Opcode):
def __init__(self):
super(MULOpcode, self).__init__('MUL')
def __repr__(self):
return 'MULOpcode()'
def to_llvm(self, out, arguments):
arg0 = arguments[0].to_llvm(out)
arg1 = arguments[1].to_llvm(out)
tmp1 = out.temp_variable()
tmp2 = out.temp_variable()
tmp3 = out.temp_variable()
tmp4 = out.temp_variable()
out.write_line('%s = zext i16 %s to i32' % (tmp1, arg0))
out.write_line('%s = zext i16 %s to i32' % (tmp2, arg1))
out.write_line('%s = mul i32 %s, %s' % (tmp3, tmp1, tmp2))
out.write_line('%s = trunc i32 %s to i16' % (tmp4, tmp3))
arguments[0].to_llvm_store(out, tmp4)
# overflow
tmp5 = out.temp_variable()
tmp6 = out.temp_variable()
out.write_line('%s = lshr i32 %s, 16' % (tmp5, tmp3))
out.write_line('%s = trunc i32 %s to i16' % (tmp6, tmp5))
out.set_reg('O', tmp6)
class DIVOpcode(Opcode):
def __init__(self):
super(DIVOpcode, self).__init__('DIV')
def __repr__(self):
return 'DIVOpcode()'
def to_llvm(self, out, arguments):
arg0 = arguments[0].to_llvm(out)
arg1 = arguments[1].to_llvm(out)
tmp1 = out.temp_variable()
label1 = out.label()
label2 = out.label()
label3 = out.label()
out.write_line('br label %%%s' % label1)
out.write_line('%s:' % label1)
out.write_line('%s = icmp eq i16 %s, 0' % (tmp1, arg1))
out.write_line('br i1 %s, label %%%s, label %%%s' % (tmp1, label3, label2))
# regular divide
tmp2 = out.temp_variable()
tmp3 = out.temp_variable()
tmp4 = out.temp_variable()
tmp5 = out.temp_variable()
tmp6 = out.temp_variable()
tmp7 = out.temp_variable()
tmp8 = out.temp_variable()
out.write_line('%s:' % label2)
out.write_line('%s = zext i16 %s to i32' % (tmp2, arg0))
out.write_line('%s = zext i16 %s to i32' % (tmp3, arg1))
out.write_line('%s = shl i32 %s, 16' % (tmp4, tmp2))
out.write_line('%s = udiv i32 %s, %s' % (tmp5, tmp4, tmp3))
out.write_line('%s = lshr i32 %s, 16' % (tmp6, tmp5))
out.write_line('%s = trunc i32 %s to i16' % (tmp7, tmp6))
out.write_line('%s = trunc i32 %s to i16' % (tmp8, tmp5))
out.write_line('br label %%%s' % label3)
# done
tmp9 = out.temp_variable()
tmp10 = out.temp_variable()
out.write_line('%s:' % label3)
out.write_line('%s = phi i16 [0, %%%s], [%s, %%%s]' % (tmp9, label1, tmp7, label2))
out.write_line('%s = phi i16 [0, %%%s], [%s, %%%s]' % (tmp10, label1, tmp8, label2))
arguments[0].to_llvm_store(out, tmp9)
out.set_reg('O', tmp10)
class MODOpcode(Opcode):
def __init__(self):
super(MODOpcode, self).__init__('MOD')
def __repr__(self):
return 'MODOpcode()'
def to_llvm(self, out, arguments):
arg0 = arguments[0].to_llvm(out)
arg1 = arguments[1].to_llvm(out)
tmp1 = out.temp_variable()
label1 = out.label()
label2 = out.label()
label3 = out.label()
out.write_line('br label %%%s' % label1)
out.write_line('%s:' % label1)
out.write_line('%s = icmp eq i16 %s, 0' % (tmp1, arg1))
out.write_line('br i1 %s, label %%%s, label %%%s' % (tmp1, label3, label2))
# regular divide
tmp2 = out.temp_variable()
out.write_line('%s:' % label2)
out.write_line('%s = urem i16 %s, %s' % (tmp2, arg0, arg1))
arguments[0].to_llvm_store(out, tmp2)
out.write_line('br label %%%s' % label3)
# done
tmp3 = out.temp_variable()
out.write_line('%s:' % label3)
out.write_line('%s = phi i16 [0, %%%s], [%s, %%%s]' % (tmp3, label1, tmp2, label2))
arguments[0].to_llvm_store(out, tmp3)
out.set_reg('O', 0)
class SUBOpcode(Opcode):
def __init__(self):
super(SUBOpcode, self).__init__('SUB')
def __repr__(self):
return 'SUBOpcode()'
def to_llvm(self, out, arguments):
arg0 = arguments[0].to_llvm(out)
arg1 = arguments[1].to_llvm(out)
tmp1 = out.temp_variable()
tmp2 = out.temp_variable()
tmp3 = out.temp_variable()
tmp4 = out.temp_variable()
out.write_line('%s = zext i16 %s to i32' % (tmp1, arg0))
out.write_line('%s = zext i16 %s to i32' % (tmp2, arg1))
out.write_line('%s = sub i32 %s, %s' % (tmp3, tmp1, tmp2))
out.write_line('%s = trunc i32 %s to i16' % (tmp4, tmp3))
arguments[0].to_llvm_store(out, tmp4)
# underflow
tmp5 = out.temp_variable()
tmp6 = out.temp_variable()
out.write_line('%s = lshr i32 %s, 16' % (tmp5, tmp3))
out.write_line('%s = trunc i32 %s to i16' % (tmp6, tmp5))
out.set_reg('O', tmp6)
class JSROpcode(Opcode):
def __init__(self):
super(JSROpcode, self).__init__('JSR')
def __repr__(self):
return 'JSROpcode()'
def to_llvm(self, out, arguments):
out.dump_regs()
out.write_line('call void @%s(%%struct.VMState* %%state)' % arguments[0].label())
out.reset_regs()
class IFEOpcode(Opcode):
def __init__(self):
super(IFEOpcode, self).__init__('IFE')
def __repr__(self):
return 'IFEOpcode()'
def to_llvm(self, out, arguments):
arg1 = arguments[0].to_llvm(out)
arg2 = arguments[1].to_llvm(out)
tmp1 = out.temp_variable()
out.write_line('%s = icmp eq i16 %s, %s' % (tmp1, arg1, arg2))
label1 = out.label()
label2 = out.label()
out.write_line('br i1 %s, label %%%s, label %%%s' % (tmp1, label1, label2))
out.write_line('%s:' % label1)
def post_condition():
out.write_line('br label %%%s' % label2)
out.write_line('%s:' % label2)
return post_condition
class IFNOpcode(Opcode):
def __init__(self):
super(IFNOpcode, self).__init__('IFN')
def __repr__(self):
return 'IFNOpcode()'
def to_llvm(self, out, arguments):
arg1 = arguments[0].to_llvm(out)
arg2 = arguments[1].to_llvm(out)
tmp1 = out.temp_variable()
out.write_line('%s = icmp ne i16 %s, %s' % (tmp1, arg1, arg2))
label1 = out.label()
label2 = out.label()
out.write_line('br i1 %s, label %%%s, label %%%s' % (tmp1, label1, label2))
out.write_line('%s:' % label1)
def post_condition():
out.write_line('br label %%%s' % label2)
out.write_line('%s:' % label2)
return post_condition
class IFGOpcode(Opcode):
def __init__(self):
super(IFGOpcode, self).__init__('IFG')
def __repr__(self):
return 'IFGOpcode()'
def to_llvm(self, out, arguments):
arg1 = arguments[0].to_llvm(out)
arg2 = arguments[1].to_llvm(out)
tmp1 = out.temp_variable()
out.write_line('%s = icmp ugt i16 %s, %s' % (tmp1, arg1, arg2))
label1 = out.label()
label2 = out.label()
out.write_line('br i1 %s, label %%%s, label %%%s' % (tmp1, label1, label2))
out.write_line('%s:' % label1)
def post_condition():
out.write_line('br label %%%s' % label2)
out.write_line('%s:' % label2)
return post_condition
class IFBOpcode(Opcode):
def __init__(self):
super(IFBOpcode, self).__init__('IFB')
def __repr__(self):
return 'IFBOpcode()'
def to_llvm(self, out, arguments):
arg1 = arguments[0].to_llvm(out)
arg2 = arguments[1].to_llvm(out)
tmp1 = out.temp_variable()
tmp2 = out.temp_variable()
out.write_line('%s = and i16 %s, %s' % (tmp1, arg1, arg2))
out.write_line('%s = icmp ne i16 %s, 0' % (tmp2, tmp1))
label1 = out.label()
label2 = out.label()
out.write_line('br i1 %s, label %%%s, label %%%s' % (tmp2, label1, label2))
out.write_line('%s:' % label1)
def post_condition():
out.write_line('br label %%%s' % label2)
out.write_line('%s:' % label2)
return post_condition
class SHLOpcode(Opcode):
def __init__(self):
super(SHLOpcode, self).__init__('SHL')
def __repr__(self):
return 'SHLOpcode()'
def to_llvm(self, out, arguments):
arg0 = arguments[0].to_llvm(out)
arg1 = arguments[1].to_llvm(out)
tmp1 = out.temp_variable()
tmp2 = out.temp_variable()
tmp3 = out.temp_variable()
tmp4 = out.temp_variable()
out.write_line('%s = zext i16 %s to i32' % (tmp1, arg0))
out.write_line('%s = zext i16 %s to i32' % (tmp2, arg1))
out.write_line('%s = shl i32 %s, %s' % (tmp3, tmp1, tmp2))
out.write_line('%s = trunc i32 %s to i16' % (tmp4, tmp3))
arguments[0].to_llvm_store(out, tmp4)
# overflow
tmp5 = out.temp_variable()
tmp6 = out.temp_variable()
out.write_line('%s = lshr i32 %s, 16' % (tmp5, tmp3))
out.write_line('%s = trunc i32 %s to i16' % (tmp6, tmp5))
out.set_reg('O', tmp6)
class SHROpcode(Opcode):
def __init__(self):
super(SHROpcode, self).__init__('SHR')
def __repr__(self):
return 'SHROpcode()'
def to_llvm(self, out, arguments):
arg0 = arguments[0].to_llvm(out)
arg1 = arguments[1].to_llvm(out)
tmp1 = out.temp_variable()
tmp2 = out.temp_variable()
tmp3 = out.temp_variable()
tmp4 = out.temp_variable()
tmp5 = out.temp_variable()
tmp6 = out.temp_variable()
out.write_line('%s = zext i16 %s to i32' % (tmp1, arg0))
out.write_line('%s = zext i16 %s to i32' % (tmp2, arg1))
out.write_line('%s = shl i32 %s, 16' % (tmp3, tmp1))
out.write_line('%s = lshr i32 %s, %s' % (tmp4, tmp3, tmp2))
out.write_line('%s = lshr i32 %s, 16' % (tmp5, tmp4))
out.write_line('%s = trunc i32 %s to i16' % (tmp6, tmp5))
arguments[0].to_llvm_store(out, tmp6)
# overflow
tmp7 = out.temp_variable()
out.write_line('%s = trunc i32 %s to i16' % (tmp7, tmp4))
out.set_reg('O', tmp7)
class ANDOpcode(Opcode):
def __init__(self):
super(ANDOpcode, self).__init__('AND')
def __repr__(self):
return 'ANDOpcode()'
def to_llvm(self, out, arguments):
arg0 = arguments[0].to_llvm(out)
arg1 = arguments[1].to_llvm(out)
tmp1 = out.temp_variable()
out.write_line('%s = and i16 %s, %s' % (tmp1, arg0, arg1))
arguments[0].to_llvm_store(out, tmp1)
class OROpcode(Opcode):
def __init__(self):
super(OROpcode, self).__init__('OR')
def __repr__(self):
return 'OROpcode()'
def to_llvm(self, out, arguments):
arg0 = arguments[0].to_llvm(out)
arg1 = arguments[1].to_llvm(out)
tmp1 = out.temp_variable()
out.write_line('%s = or i16 %s, %s' % (tmp1, arg0, arg1))
arguments[0].to_llvm_store(out, tmp1)
class XOROpcode(Opcode):
def __init__(self):
super(XOROpcode, self).__init__('XOR')
def __repr__(self):
return 'XOROpcode()'
def to_llvm(self, out, arguments):
arg0 = arguments[0].to_llvm(out)
arg1 = arguments[1].to_llvm(out)
tmp1 = out.temp_variable()
out.write_line('%s = xor i16 %s, %s' % (tmp1, arg0, arg1))
arguments[0].to_llvm_store(out, tmp1)
class Register(object):
def __init__(self, register, offset):
self._register = register
self._offset = offset
def __repr__(self):
return 'Register(' + repr(self._register) + ', ' + repr(self._offset) + ')'
def register(self):
return self._register
def offset(self):
return self._offset
def extra_length(self):
return 0
def to_das(self):
return self._register
def to_llvm(self, out):
return out.reg(self._register)
def to_llvm_store(self, out, value):
out.set_reg(self._register, value)
def dump_reg(self, out):
out.dump_reg(self._register)
class Number(object):
def __init__(self, num):
self._num = num
def __repr__(self):
return 'Number(' + repr(self._num) + ')'
def extra_length(self):
return 0
def to_das(self):
return str(self._num)
def to_llvm(self, out):
return str(self._num)
class Label(object):
def __init__(self, label):
self._label = label
def __repr__(self):
return 'Label(' + repr(self._label) + ')'
def extra_length(self):
return 0
def to_das(self):
return self._label
def label(self):
return self._label
class Addition(object):
def __init__(self, number, register):
self._number = number
self._register = register
def __repr__(self):
return 'Addition(' + repr(self._number) + ', ' + repr(self._register) + ')'
def extra_length(self):
return 1
def to_das(self):
return self._number.to_das() + '+' + self._register.to_das()
def to_llvm(self, out):
arg1 = self._number.to_llvm(out)
arg2 = self._register.to_llvm(out)
tmp1 = out.temp_variable()
out.write_line('%s = add i16 %s, %s' % (tmp1, arg1, arg2))
return tmp1
class Dereference(object):
def __init__(self, argument):
self._argument = argument
def __repr__(self):
return 'Dereference(' + repr(self._argument) + ')'
def extra_length(self):
return self._argument.extra_length()
def to_das(self):
return '[' + self._argument.to_das() + ']'
def to_llvm(self, out):
arg0 = self._argument.to_llvm(out)
tmp1 = out.temp_variable()
tmp2 = out.temp_variable()
out.write_line('%s = getelementptr i16* %%memory, i16 %s' % (tmp1, arg0))
out.write_line('%s = load i16* %s' % (tmp2, tmp1))
out.write_line('call void @memory_referenced(%%struct.VMState* %%state, i16 %s)' % arg0)
return tmp2
def to_llvm_store(self, out, value):
arg0 = self._argument.to_llvm(out)
tmp = out.temp_variable()
out.write_line('%s = getelementptr i16* %%memory, i16 %s' % (tmp, arg0))
out.write_line('store i16 %s, i16* %s' % (value, tmp))
out.write_line('call void @memory_referenced(%%struct.VMState* %%state, i16 %s)' % arg0)
def dump_reg(self, out):
pass
class Instruction(object):
def __init__(self, label, opcode, arguments):
self._label = label
self._opcode = opcode
self._arguments = arguments
self._pc = 0
def __repr__(self):
args = []
if (self._label):
args += ['label=' + repr(self._label)]
args += ['opcode=' + repr(self._opcode)]
args += ['arguments=[' + ', '.join([repr(x) for x in self._arguments]) + ']']
return 'Instruction(' + ', '.join(args) + ')'
def opcode(self):
return self._opcode
def arguments(self):
return self._arguments
def label(self):
return self._label
def pc(self):
return self._pc
def set_pc(self, pc):
self._pc = pc
def length(self):
if self.is_vm_instruction():
return 0
return 1 + sum([x.extra_length() for x in self._arguments])
def jump_label(self):
if self._is_set_PC() and not self.is_return():
return self._arguments[1].label()
return None
def is_return(self):
return self._is_set_PC() and self._arguments[1] == Pop
def _is_set_PC(self):
return isinstance(self._opcode, SETOpcode) and isinstance(self._arguments[0], Register) and \
self._arguments[0].register() == 'PC'
def is_vm_instruction(self):
return isinstance(self._opcode, DBGOpcode) or isinstance(self._opcode, OUTOpcode)
def to_llvm(self, out):
out.write_line('')
out.write_line('; %s' % self.to_das())
if self._label is not None:
out.write_line('br label %%%s' % self._label)
out.write_line('%s:' % self._label)
out.set_reg('PC', self._pc)
if self.jump_label() is not None:
out.dump_regs()
out.write_line('br label %%%s' % self.jump_label())
return True, self.jump_label(), None
elif self.is_return():
out.dump_regs()
out.write_line('ret void')
return True, None, None
else:
return False, None, self._opcode.to_llvm(out, self._arguments)
def to_das(self):
return self._opcode.to_das() + ' ' + ', '.join([x.to_das() for x in self._arguments])
class Pop(object):
def __repr__(self):
return 'Pop()'
def to_das(self):
return 'POP'
def extra_length(self):
return 0
Pop = Pop()
class Peek(object):
def __repr__(self):
return 'Peek()'
def to_das(self):
return 'PEEK'
def extra_length(self):
return 0
Peek = Peek()
class Push(object):
def __repr__(self):
return 'Push()'
def to_das(self):
return 'PUSH'
def extra_length(self):
return 0
Push = Push()
class Program(object):
def __init__(self, instructions):
self._instructions = instructions
self._link()
def __repr__(self):
return 'Program([\n' + ',\n'.join([repr(x) for x in self._instructions]) + '])'
def _make_label_map(self):
return dict([(x[1].label(), x[0]) for x in enumerate(self._instructions) if x[1].label() is not None])
def _identify_function_labels(self):
return set([x.arguments()[0].label() for x in self._instructions if isinstance(x.opcode(), JSROpcode)])
def _link(self):
pc = 0
for instruction in self._instructions:
instruction.set_pc(pc)
pc += instruction.length()
self._label_map = self._make_label_map()
self._function_starts = self._identify_function_labels()
function_labels = {}
def _to_llvm_block(self, index, out):
block_out = LLVM_Block_Out(out)
block_out.reset_regs()
referenced_labels = set()
post_conditions = []
first = True
for instruction in self._instructions[index:]:
if not first and instruction.label() is not None:
referenced_labels.add(instruction.label())
break
stop, label, post_condition = instruction.to_llvm(block_out)
done = stop and len(post_conditions) == 0
if post_condition is None:
for post_condition in post_conditions:
post_condition()
post_conditions = []
else:
post_conditions = [post_condition] + post_conditions
if label is not None:
referenced_labels.add(label)
if done:
break
first = False
block_out.dump_regs()
return referenced_labels
def _to_llvm_function(self, name, index, out):
rendered_labels = set()
pending_labels = set([index])
start_label = self._instructions[index].label()
if start_label is not None:
rendered_labels.add(start_label)
out.write_line('define void @%s (%%struct.VMState* nocapture %%state) nounwind {' % name)
out.indent()
for register in registers.values():
out.write_line('%%%s = getelementptr %%struct.VMState* %%state, i32 0, i32 0, i32 %s' % (register.register(), register.offset()))
out.write_line('%memory = getelementptr %struct.VMState* %state, i32 0, i32 1, i32 0')
func_out = LLVM_Function_Out(out)
while len(pending_labels) > 0:
sorted_labels = list(pending_labels)
sorted_labels.sort(key=lambda (x): (isinstance(x, int) and -1) or self._label_map[x])
label = sorted_labels[0]
rendered_labels.add(label)
if isinstance(label, basestring):
index = self._label_map[label]
else:
index = label
pending_labels = pending_labels.union(self._to_llvm_block(index, func_out))
pending_labels = pending_labels.difference(rendered_labels)
out.write_line('')
out.write_line('ret void')
out.dedent()
out.write_line('}')
def to_llvm(self, out):
out.write_line('%struct.VMState = type { [11 x i16], [65536 x i16], [1024 x i8] }')
out.write_line('declare void @output(i16) nounwind')
out.write_line('declare void @debug(%struct.VMState* nocapture) nounwind')
out.write_line('declare void @memory_referenced(%struct.VMState* nocapture, i16) nounwind')
self._to_llvm_function('runMachine', 0, out)
for label in self._function_starts:
self._to_llvm_function(label, self._label_map[label], out)
class LLVM_Out(object):
def __init__(self, f):
self._f = f;
self._func_counter = 0
self._indent = ''
def write_line(self, s):
print >>self._f, self._indent + s
def indent(self):
self._indent += ' '
def dedent(self):
self._indent = self._indent[:-2]
def func(self):
result = '%%func%d' % self._func_counter
self._temp_counter += 1
return result
class LLVM_Function_Out(object):
def __init__(self, out):
self._out = out;
self._temp_counter = 0
self._label_counter = 0
def write_line(self, s):
self._out.write_line(s)
def indent(self):
self._out.indent()
def dedent(self):
self._out.dedent()
def temp_variable(self):
result = '%%tmp%d' % self._temp_counter
self._temp_counter += 1
return result
def label(self):
result = 'label%d' % self._label_counter
self._label_counter += 1
return result
class LLVM_Block_Out(object):
def __init__(self, out):
self._out = out;
self.reset_regs()
def reset_regs(self):
self._reg_vars = dict([(x, (False, '%%%s' % x)) for x in registers.keys()])
def write_line(self, s):
self._out.write_line(s)
def indent(self):
self._out.indent()
def dedent(self):
self._out.dedent()
def temp_variable(self):
return self._out.temp_variable()
def label(self):
return self._out.label()
def reg(self, register):
is_temp_var, var = self._reg_vars[register]
if not is_temp_var:
tmp = self.temp_variable()
self.write_line('%s = load i16* %s' % (tmp, var))
var = tmp
self._reg_vars[register] = (True, var)
return var
def set_reg(self, register, value):
self._reg_vars[register] = (True, value)
def dump_regs(self, include_PC = False):
for register, (is_temp_var, var) in self._reg_vars.items():
if register != 'PC' or include_PC:
if is_temp_var:
self.write_line('store i16 %s, i16* %%%s' % (var, register))
opcodes = {
'SET': SETOpcode(),
'OUT': OUTOpcode(),
'DBG': DBGOpcode(),
'SUB': SUBOpcode(),
'ADD': ADDOpcode(),
'MUL': MULOpcode(),
'DIV': DIVOpcode(),
'MOD': MODOpcode(),
'IFE': IFEOpcode(),
'IFN': IFNOpcode(),
'IFG': IFGOpcode(),
'IFB': IFBOpcode(),
'JSR': JSROpcode(),
'SHL': SHLOpcode(),
'SHR': SHROpcode(),
'AND': ANDOpcode(),
'OR': OROpcode(),
'XOR': XOROpcode(),
}
registers = {
'A': Register('A', 0),
'B': Register('B', 1),
'C': Register('C', 2),
'X': Register('X', 3),
'Y': Register('Y', 4),
'Z': Register('Z', 5),
'I': Register('I', 6),
'J': Register('J', 7),
'SP': Register('SP', 8),
'PC': Register('PC', 9),
'O': Register('O', 10),
}
ParserElement.setDefaultWhitespaceChars(' \t')
OPCODE = Or([Literal(x) for x in opcodes.keys()])
OPCODE.setParseAction(lambda s,l,t: opcodes[t[0]])
REGISTER = Or([Literal(x) for x in registers.keys()])
COMMENT = Suppress(';' + CharsNotIn('\n')*(0,1))
REGISTER_ARGUMENT = REGISTER
REGISTER_ARGUMENT.setParseAction(lambda s,l,t: registers[t[0]])
HEX_ARGUMENT = '0x' + Word('0123456789ABCDEF')
HEX_ARGUMENT.setParseAction(lambda s,l,t: Number(int(t[1], 16)))
DEC_ARGUMENT = Word(nums)
DEC_ARGUMENT.setParseAction(lambda s,l,t: Number(int(t[0])))
NUMERIC_ARGUMENT = HEX_ARGUMENT ^ DEC_ARGUMENT
ADD_ARGUMENT = NUMERIC_ARGUMENT + '+' + REGISTER
ADD_ARGUMENT.setParseAction(lambda s,l,t: Addition(t[0], t[2]))
BASIC_ARGUMENT = ADD_ARGUMENT ^ REGISTER ^ NUMERIC_ARGUMENT
DEREFERENCED_ARGUMENT = '[' + BASIC_ARGUMENT + ']'
DEREFERENCED_ARGUMENT.setParseAction(lambda s,l,t: Dereference(t[1]))
LABEL_ARGUMENT = Word(alphas)
LABEL_ARGUMENT.setParseAction(lambda s,l,t: Label(t[0]))
POP_ARGUMENT = Literal('POP')
POP_ARGUMENT.setParseAction(lambda s,l,t: Pop)
PEEK_ARGUMENT = Literal('PEEK')
PEEK_ARGUMENT.setParseAction(lambda s,l,t: Peek)
PUSH_ARGUMENT = Literal('PUSH')
PUSH_ARGUMENT.setParseAction(lambda s,l,t: Push)
ARGUMENT = DEREFERENCED_ARGUMENT ^ BASIC_ARGUMENT ^ \
POP_ARGUMENT ^ PEEK_ARGUMENT ^ PUSH_ARGUMENT ^ LABEL_ARGUMENT
LABEL = Word(':', alphas)
INSTRUCTION = LABEL*(0,1) + OPCODE + (ARGUMENT + (Suppress(',') + ARGUMENT)*(0,))*(0,1)
def make_instruction(s,l,t):
if isinstance(t[0], basestring):
return Instruction(t[0][1:], t[1], t[2:])
else:
return Instruction(None, t[0], t[1:])
INSTRUCTION.setParseAction(make_instruction)
LINE = INSTRUCTION*(0,1) + COMMENT*(0,1) + Suppress(Literal('\n'))
PROGRAM = LINE*(0,)
PROGRAM.setParseAction(lambda s,l,t: Program(t))
program = PROGRAM.parseFile(sys.stdin)
program[0].to_llvm(LLVM_Out(sys.stdout))
| {
"content_hash": "282f938ca6624d17dab8bdef4dd730eb",
"timestamp": "",
"source": "github",
"line_count": 898,
"max_line_length": 135,
"avg_line_length": 28.484409799554566,
"alnum_prop": 0.6029555494741781,
"repo_name": "sblom/dcpu16",
"id": "da011b93a1fcedc3e76cb8c7dda3c3b046d62d94",
"size": "25579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "translator/llvm/compile-dcpu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "973"
},
{
"name": "C#",
"bytes": "13195"
},
{
"name": "Python",
"bytes": "15819"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from pycrunchbase import Relationship
PAST_TEAM_RELATIONSHIP = {
"cardinality": "OneToMany",
"paging": {
"total_items": 3,
"first_page_url": "https://api.crunchbase.com/v3.1/"
"organization/example/past_team",
"sort_order": "created_at DESC"
},
"items": [
{
"type": "Job",
"uuid": "558bac9a0e484b1478762b6e32b66aaa",
"properties": {
"title": "Co-Founder / President",
"started_on": "2008-07-01",
"started_on_trust_code": 6,
"ended_on": None,
"ended_on_trust_code": None,
"created_at": 1401278974,
"updated_at": 1437762874
},
"relationships": {
"person": {
"type": "Person",
"uuid": "e37bfcba7c041eb29abf404725cf9fc9",
"properties": {
"permalink": "tom-preston-werner",
"api_path": "people/tom-preston-werner",
"web_path": "person/tom-preston-werner",
"first_name": "Tom",
"last_name": "Preston-Werner",
"also_known_as": None,
"bio": "Tom Preston-Werner is a software developer and entrepreneur who co-founded GitHub in 2008, along with Chris Wanstrath and PJ Hyett, to simplify sharing code and make it easy to collaborate on building software. Today, GitHub is the largest code host in the world, with a community of four million people building software together.\r\n\r\nBefore founding GitHub, Tom worked as a Ruby developer for Powerset, a Wikipedia search engine that was acquired by Microsoft. Additionally, Tom invented Gravatar, a service for providing unique avatars that follow you from site to site, which he sold to Automattic in 2007.\r\n\r\nTom grew up in Iowa and came to the west coast to study physics at Harvey Mudd College; he left after two years when he realized that he enjoyed programming far more than the math that was the core of his physics studies. He currently lives in San Francisco with his wife and son.",
"role_investor": True,
"born_on": None,
"born_on_trust_code": None,
"died_on": None,
"died_on_trust_code": 0,
"created_at": 1208251918,
"updated_at": 1443777328
}
}
}
}
]
}
HEADQUARTERS_RELATIONSHIP = {
"cardinality": "OneToOne",
"item": {
"type": "Address",
"uuid": "31adbab5e90fc45a47ae873e0656fadd",
"properties": {
"name": "Headquarters",
"street_1": "1601 Willow Road",
"street_2": None,
"postal_code": "94025",
"city": "Menlo Park",
"city_web_path": "location/menlo-park/"
"1f8abfef5379b26b702005a09908492f",
"region": "California",
"region_web_path": "location/california/"
"eb879a83c91a121e0bb8829782dbcf04",
"country": "United States",
"country_web_path": "location/united-states/"
"f110fca2105599f6996d011c198b3928",
"latitude": 37.41605,
"longitude": -122.151801,
"created_at": 1205362453,
"updated_at": 1398138077
}
}
}
T_R = {
"cardinality": "OneToMany",
"paging": {
"total_items": 4,
"first_page_url": "https://api.crunchbase.com/v3.1/funding-rounds/49182d090879aebb464ac8ed65ccb936/investments",
"sort_order": "created_at DESC"
},
"items": [
{
"type": "Investment",
"uuid": "c04e0510e80ced0708d4a3490c60cd22",
"properties": {
"money_invested": None,
"money_invested_currency_code": None,
"money_invested_usd": None,
"created_at": 1438215743,
"updated_at": 1438273006
},
"relationships": {
"investors": [
{
"type": "Organization",
"uuid": "a2281da98a3eda3d56b2b8e0725b1b51",
"properties": {
"permalink": "institutional-venture-partners",
"api_path": "organizations/institutional-venture-partners",
"name": "Investor"
}
}]
}
}]
}
class RelationshipTestCase(TestCase):
def test_one_to_many_relationship(self):
past_team = Relationship('past_team', PAST_TEAM_RELATIONSHIP)
job = past_team.get(0)
self.assertEqual(job.title, 'Co-Founder / President')
self.assertEqual(job.person.first_name, 'Tom')
self.assertEqual('past_team', past_team.name)
def test_one_to_one_relationship(self):
hq = Relationship('headquarters', HEADQUARTERS_RELATIONSHIP)
self.assertEqual(hq.name, "Headquarters")
def test_nested_relationships(self):
r = Relationship('investments', T_R)
self.assertEqual(r[0].investors[0].name, 'Investor')
| {
"content_hash": "b2aa78a3c8db4a5bb329087019792704",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 935,
"avg_line_length": 42.51968503937008,
"alnum_prop": 0.5296296296296297,
"repo_name": "ngzhian/pycrunchbase",
"id": "8c4b27091ee1d4428953e89bb6e19191121d7588",
"size": "5400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_relationship.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1489"
},
{
"name": "PowerShell",
"bytes": "2986"
},
{
"name": "Python",
"bytes": "114393"
}
],
"symlink_target": ""
} |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/levels/level/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines interface ISIS level state information.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__level_number",
"__passive",
"__priority",
"__enabled",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__level_number = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..2"]},
),
is_leaf=True,
yang_name="level-number",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:level-number",
is_config=False,
)
self.__passive = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="passive",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__priority = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0 .. 127"]},
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"levels",
"level",
"state",
]
def _get_level_number(self):
"""
Getter method for level_number, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/level_number (oc-isis-types:level-number)
YANG Description: ISIS level number(level-1, level-2).
"""
return self.__level_number
def _set_level_number(self, v, load=False):
"""
Setter method for level_number, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/level_number (oc-isis-types:level-number)
If this variable is read-only (config: false) in the
source YANG file, then _set_level_number is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_level_number() directly.
YANG Description: ISIS level number(level-1, level-2).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["1..2"]},
),
is_leaf=True,
yang_name="level-number",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:level-number",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """level_number must be of a type compatible with oc-isis-types:level-number""",
"defined-type": "oc-isis-types:level-number",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..2']}), is_leaf=True, yang_name="level-number", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:level-number', is_config=False)""",
}
)
self.__level_number = t
if hasattr(self, "_set"):
self._set()
def _unset_level_number(self):
self.__level_number = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..2"]},
),
is_leaf=True,
yang_name="level-number",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:level-number",
is_config=False,
)
def _get_passive(self):
"""
Getter method for passive, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/passive (boolean)
YANG Description: ISIS passive interface admin enable/disable function.
"""
return self.__passive
def _set_passive(self, v, load=False):
"""
Setter method for passive, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/passive (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_passive is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_passive() directly.
YANG Description: ISIS passive interface admin enable/disable function.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="passive",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """passive must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="passive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__passive = t
if hasattr(self, "_set"):
self._set()
def _unset_passive(self):
self.__passive = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="passive",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_priority(self):
"""
Getter method for priority, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/priority (uint8)
YANG Description: ISIS neighbor priority(LAN hello PDU only).
"""
return self.__priority
def _set_priority(self, v, load=False):
"""
Setter method for priority, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/priority (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority() directly.
YANG Description: ISIS neighbor priority(LAN hello PDU only).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0 .. 127"]},
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """priority must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0 .. 127']}), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__priority = t
if hasattr(self, "_set"):
self._set()
def _unset_priority(self):
self.__priority = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0 .. 127"]},
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/enabled (boolean)
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
level_number = __builtin__.property(_get_level_number)
passive = __builtin__.property(_get_passive)
priority = __builtin__.property(_get_priority)
enabled = __builtin__.property(_get_enabled)
_pyangbind_elements = OrderedDict(
[
("level_number", level_number),
("passive", passive),
("priority", priority),
("enabled", enabled),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/levels/level/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines interface ISIS level state information.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__level_number",
"__passive",
"__priority",
"__enabled",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__level_number = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..2"]},
),
is_leaf=True,
yang_name="level-number",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:level-number",
is_config=False,
)
self.__passive = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="passive",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__priority = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0 .. 127"]},
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"levels",
"level",
"state",
]
def _get_level_number(self):
"""
Getter method for level_number, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/level_number (oc-isis-types:level-number)
YANG Description: ISIS level number(level-1, level-2).
"""
return self.__level_number
def _set_level_number(self, v, load=False):
"""
Setter method for level_number, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/level_number (oc-isis-types:level-number)
If this variable is read-only (config: false) in the
source YANG file, then _set_level_number is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_level_number() directly.
YANG Description: ISIS level number(level-1, level-2).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["1..2"]},
),
is_leaf=True,
yang_name="level-number",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:level-number",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """level_number must be of a type compatible with oc-isis-types:level-number""",
"defined-type": "oc-isis-types:level-number",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..2']}), is_leaf=True, yang_name="level-number", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:level-number', is_config=False)""",
}
)
self.__level_number = t
if hasattr(self, "_set"):
self._set()
def _unset_level_number(self):
self.__level_number = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..2"]},
),
is_leaf=True,
yang_name="level-number",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:level-number",
is_config=False,
)
def _get_passive(self):
"""
Getter method for passive, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/passive (boolean)
YANG Description: ISIS passive interface admin enable/disable function.
"""
return self.__passive
def _set_passive(self, v, load=False):
"""
Setter method for passive, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/passive (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_passive is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_passive() directly.
YANG Description: ISIS passive interface admin enable/disable function.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="passive",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """passive must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="passive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__passive = t
if hasattr(self, "_set"):
self._set()
def _unset_passive(self):
self.__passive = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="passive",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_priority(self):
"""
Getter method for priority, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/priority (uint8)
YANG Description: ISIS neighbor priority(LAN hello PDU only).
"""
return self.__priority
def _set_priority(self, v, load=False):
"""
Setter method for priority, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/priority (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority() directly.
YANG Description: ISIS neighbor priority(LAN hello PDU only).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0 .. 127"]},
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """priority must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0 .. 127']}), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__priority = t
if hasattr(self, "_set"):
self._set()
def _unset_priority(self):
self.__priority = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0 .. 127"]},
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/enabled (boolean)
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
level_number = __builtin__.property(_get_level_number)
passive = __builtin__.property(_get_passive)
priority = __builtin__.property(_get_priority)
enabled = __builtin__.property(_get_enabled)
_pyangbind_elements = OrderedDict(
[
("level_number", level_number),
("passive", passive),
("priority", priority),
("enabled", enabled),
]
)
| {
"content_hash": "fc06d08d7848e92e087d018214be92d3",
"timestamp": "",
"source": "github",
"line_count": 875,
"max_line_length": 514,
"avg_line_length": 40.990857142857145,
"alnum_prop": 0.5647531156773636,
"repo_name": "napalm-automation/napalm-yang",
"id": "d30f3d3cd5a99c2c96f494772f97ecb8bab58cec",
"size": "35891",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/state/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "370237"
},
{
"name": "Jupyter Notebook",
"bytes": "152135"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "105688785"
},
{
"name": "Roff",
"bytes": "1632"
}
],
"symlink_target": ""
} |
import discord
import logging
import time
import datetime
import youtube_dl
from discord.ext import commands
from settings import token
logging.basicConfig(level=logging.INFO)
startuptime = int(time.time())
class YoutubeSource(discord.FFmpegPCMAudio):
def __init__(self, url):
opts = {
'format': 'webm[abr>0]/bestaudio/best',
'prefer_ffmpeg': True,
'quiet': True
}
ytdl = youtube_dl.YoutubeDL(opts)
info = ytdl.extract_info(url, download=False)
super().__init__(info['url'])
class MyClient(commands.AutoShardedBot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
extensions = ["cogs"]
# extensions could be:
# extensions = ["cogs.bla", "cogs.boop", "myfile"]
for ext in extensions:
try:
self.load_extension(ext)
except Exception as e:
print(f"{ext} could not be loaded: {e.__class__.__name__}: {e}")
async def on_ready(self):
print('Logged in as')
print(self.user.name)
print(self.user.id)
print('------')
# with open("avatar.png", "rb") as f:
# avatar = f.read()
# await client.user.edit(username="BoxBot", avatar=avatar)
# this is also not nessessary, just eats internet each time you start with that photo
async def on_message(self, message):
# we do not want the bot to reply to itself
if message.author.id == self.user.id:
return
if message.content.startswith(f"{self.user.mention} uptime"):
uptime = int(time.time()) - startuptime
divider = 60
uptime = divmod(uptime % divider)
await message.channel.send(uptime)
if message.content.startswith(f"{self.user.mention} play "):
message2 = message.content.replace(f"{self.user.mention} play ", "")
source = YoutubeSource(message2)
voice_client = await client.get_channel(264541926378831872).connect()
voice_client.play(source)
await self.process_commands(message)
client = MyClient(command_prefix=commands.when_mentioned, game=discord.Game(name='Currently under development'))
client.run(token)
| {
"content_hash": "bfa69965d2bd0e805e607670f150fef3",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 112,
"avg_line_length": 33.23943661971831,
"alnum_prop": 0.5830508474576271,
"repo_name": "Paarf/BoxBot",
"id": "3541d02b7272fec54ed951067a63f01fd634f313",
"size": "2360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Robut.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4204"
}
],
"symlink_target": ""
} |
from provstore.bundle import Bundle
class BundleManager(object):
"""
A document's bundle manager.
This is an iterable and will iterate through all of a document's bundles.
.. note::
Iteration is expensive, consider using :py:class:`provstore.document.Document.prov.bundles` instead!
Example getting and adding bundles:
>>> api = Api()
>>> api.document.create(prov_document, name="name")
>>> api.bundles
A BundleManager object for this document
>>> api.bundles['ex:bundle']
A Bundle with the identifier given (if exists)
>>> api.bundles['ex:new_bundle'] = prov_bundle
Saves a new bundle with the identifier specified
"""
def __init__(self, api, document):
self._api = api
self._document = document
self._bundles = None
def __getitem__(self, key):
if not self._bundles:
self.refresh()
if key not in self._bundles:
from provstore.api import NotFoundException
raise NotFoundException()
return self._bundles[key]
def __setitem__(self, key, prov_bundle):
self._document.add_bundle(prov_bundle, key)
def __iter__(self):
if not self._bundles:
self.refresh()
return self._bundles.itervalues()
def __len__(self):
if self._bundles:
return len(self._bundles)
else:
return 0
def refresh(self):
"""
Reload list of bundles from the store
:return: self
"""
self._bundles = {}
bundles = self._api.get_bundles(self._document.id)
for bundle in bundles:
self._bundles[bundle['identifier']] = Bundle(self._api, self._document, bundle)
return self
| {
"content_hash": "3decb338cbeb75c59686ac105cc6305d",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 107,
"avg_line_length": 27.12121212121212,
"alnum_prop": 0.587709497206704,
"repo_name": "millar/provstore-api",
"id": "308e9567608d33c0b6ae6736260ff0cff14e55c3",
"size": "1790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "provstore/bundle_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37392"
},
{
"name": "Shell",
"bytes": "6717"
}
],
"symlink_target": ""
} |
"""
'E1102': ('%s is not callable',
'Used when an object being called has been infered to a non \
callable object'),
"""
__revision__ = None
__revision__()
def correct():
"""callable object"""
return 1
__revision__ = correct()
class Correct(object):
"""callable object"""
class MetaCorrect(object):
"""callable object"""
def __call__(self):
return self
INSTANCE = Correct()
CALLABLE_INSTANCE = MetaCorrect()
CORRECT = CALLABLE_INSTANCE()
INCORRECT = INSTANCE()
LIST = []
INCORRECT = LIST()
DICT = {}
INCORRECT = DICT()
TUPLE = ()
INCORRECT = TUPLE()
INT = 1
INCORRECT = INT()
# Test calling properties. Pylint can detect when using only the
# getter, but it doesn't infer properly when having a getter
# and a setter.
class MyProperty(property):
""" test subclasses """
class PropertyTest(object):
""" class """
def __init__(self):
self.attr = 4
@property
def test(self):
""" Get the attribute """
return self.attr
@test.setter
def test(self, value):
""" Set the attribute """
self.attr = value
@MyProperty
def custom(self):
""" Get the attribute """
return self.attr
@custom.setter
def custom(self, value):
""" Set the attribute """
self.attr = value
PROP = PropertyTest()
PROP.test(40)
PROP.custom()
# Safe from not-callable when using properties.
class SafeProperty(object):
@property
def static(self):
return staticmethod
@property
def klass(self):
return classmethod
@property
def get_lambda(self):
return lambda: None
@property
def other_function(self):
def function(arg):
return arg
return function
@property
def dict_builtin(self):
return dict
@property
def range_builtin(self):
return range
@property
def instance(self):
class Empty(object):
def __call__(self):
return 42
return Empty()
PROP1 = SafeProperty()
PROP1.static(2)
PROP1.klass(2)
PROP1.get_lambda()
PROP1.other_function(4)
PROP1.dict_builtin()
PROP1.range_builtin(4)
PROP1.instance()
| {
"content_hash": "311830c956eeec36eed4a555527d0dbe",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 75,
"avg_line_length": 18.99145299145299,
"alnum_prop": 0.5999099909990999,
"repo_name": "willemneal/Docky",
"id": "832657d2444b944e5fe32e3a152049417d1ce1d0",
"size": "2276",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/pylint/test/input/func_typecheck_non_callable_call.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "636"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "5145"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "3168"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "4528"
},
{
"name": "BlitzBasic",
"bytes": "1730"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "109073"
},
{
"name": "C#",
"bytes": "17784"
},
{
"name": "C++",
"bytes": "79372"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "26952"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "2878"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "91743"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "5475"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "5709"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "27879"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "15760"
},
{
"name": "Gnuplot",
"bytes": "10376"
},
{
"name": "Go",
"bytes": "172"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "81613"
},
{
"name": "JavaScript",
"bytes": "14143"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "306"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "76274"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "3385"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "318"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "17354"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "84519"
},
{
"name": "Perl",
"bytes": "3611"
},
{
"name": "Perl6",
"bytes": "49676"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PowerShell",
"bytes": "6932"
},
{
"name": "Prolog",
"bytes": "738"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "6272729"
},
{
"name": "R",
"bytes": "4057"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "1887"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "47137"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "121510"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smalltalk",
"bytes": "156665"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "16922"
},
{
"name": "Visual Basic",
"bytes": "17210"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XSLT",
"bytes": "755"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "26388"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
} |
from typing import Dict, Optional, Sequence
import libcst as cst
from libcst._nodes.internal import CodegenState, visit_required
def make_aref(name: str, idx: cst.BaseExpression) -> cst.Subscript:
sub_elt = cst.SubscriptElement(slice=cst.Index(value=idx))
return cst.Subscript(value=cst.Name(name), slice=[sub_elt])
class NBAssignTarget(cst.AssignTarget):
'''The target of a (delayed) state update'''
def _codegen_impl(self, state: CodegenState) -> None:
with state.record_syntactic_position(self):
self.target._codegen(state)
self.whitespace_before_equal._codegen(state)
# U+21D0 is "Leftwards Double Arrow" (a nice unicode rendering of
# SystemVerilog's "<=" which doesn't collide with less-than-or-equal.
state.add_token("\u21d0")
self.whitespace_after_equal._codegen(state)
class NBAssign(cst.BaseSmallStatement):
'''An assignment statement that models a (delayed) state update'''
def __init__(self, target: NBAssignTarget, value: cst.BaseExpression):
super().__init__()
self.target = target
self.value = value
def _visit_and_replace_children(self,
visitor: cst.CSTVisitorT) -> "NBAssign":
target = visit_required(self, "target", self.target, visitor)
value = visit_required(self, "value", self.value, visitor)
return NBAssign(target=target, value=value)
def _codegen_impl(self,
state: CodegenState,
default_semicolon: bool = False) -> None:
with state.record_syntactic_position(self):
self.target._codegen(state)
self.value._codegen(state)
@staticmethod
def make(lhs: cst.BaseAssignTargetExpression,
rhs: cst.BaseExpression) -> 'NBAssign':
return NBAssign(target=NBAssignTarget(target=lhs),
value=rhs)
class ImplTransformer(cst.CSTTransformer):
'''An AST visitor used to extract documentation from the ISS'''
def __init__(self) -> None:
self.impls = {} # type: Dict[str, Sequence[cst.BaseStatement]]
self.cur_class = None # type: Optional[str]
def visit_ClassDef(self, node: cst.ClassDef) -> Optional[bool]:
assert self.cur_class is None
self.cur_class = node.name.value
return None
def leave_ClassDef(self,
orig: cst.ClassDef,
updated: cst.ClassDef) -> cst.BaseStatement:
self.cur_class = None
return updated
def leave_Attribute(self,
orig: cst.Attribute,
updated: cst.Attribute) -> cst.BaseExpression:
if isinstance(updated.value, cst.Name):
stem = updated.value.value
# Strip out "self." references. In the ISS code, a field in the
# instruction appears as self.field_name. In the documentation, we
# can treat all the instruction fields as being in scope.
if stem == 'self':
return updated.attr
# Replace state.dmem with DMEM. This is an object in the ISS code,
# so you see things like state.dmem.load_u32(...). We keep the
# "object-orientated" style (so DMEM.load_u32(...)) because we need
# to distinguish between 32-bit and 256-bit loads.
if stem == 'state' and updated.attr.value == 'dmem':
return cst.Name(value='DMEM')
if isinstance(updated.value, cst.Attribute):
# This attribute looks like A.B.C where B, C are names and A may be
# a further attribute or it might be a name.
attr_a = updated.value.value
attr_b = updated.value.attr.value
attr_c = updated.attr.value
if isinstance(attr_a, cst.Name):
stem = attr_a.value
# Replace state.csrs.flags with FLAGs: the flag groups are
# stored in the CSRs in the ISS and the implementation, but
# logically exist somewhat separately, so we want named
# reads/writes from them to look different.
if (stem, attr_b, attr_c) == ('state', 'csrs', 'flags'):
return cst.Name(value='FLAGs')
return updated
def leave_FunctionDef(self,
orig: cst.FunctionDef,
updated: cst.FunctionDef) -> cst.BaseStatement:
if ((self.cur_class is None or
updated.name.value != 'execute' or
self.cur_class in self.impls)):
return updated
# The body of a function definition is always an IndentedBlock. Strip
# that out to get at the statements inside.
assert isinstance(updated.body, cst.IndentedBlock)
self.impls[self.cur_class] = updated.body.body
return updated
@staticmethod
def match_get_reg(call: cst.BaseExpression) -> Optional[cst.Subscript]:
'''Extract a RegRef from state.gprs.get_reg(foo)
Returns None if this isn't a match.
'''
if not isinstance(call, cst.Call):
return None
# We expect a single argument (which we take as the index)
if len(call.args) != 1:
return None
getreg_idx = call.args[0].value
# All we need to do still is check that call.func is an
# attribute representing state.gprs.get_reg or state.wdrs.get_reg.
if ((not isinstance(call.func, cst.Attribute) or
call.func.attr.value != 'get_reg')):
return None
state_dot_reg = call.func.value
if not isinstance(state_dot_reg, cst.Attribute):
return None
# Finally, state_dot_reg should be either state.gprs or state.wdrs
if not (isinstance(state_dot_reg.value, cst.Name) and
state_dot_reg.value.value == 'state'):
return None
regfile_name = state_dot_reg.attr.value
if regfile_name == 'gprs':
regfile_uname = 'GPRs'
elif regfile_name == 'wdrs':
regfile_uname = 'WDRs'
else:
return None
return make_aref(regfile_uname, getreg_idx)
@staticmethod
def _spot_reg_read(node: cst.Call) -> Optional[cst.BaseExpression]:
# Detect
#
# state.gprs.get_reg(FOO).read_unsigned()
# state.gprs.get_reg(FOO).read_signed()
#
# and replace with the expressions
#
# GPRs[FOO]
# from_2s_complement(GPRs[FOO])
#
# respectively.
# In either case, we expect node.func to be some long attribute
# (representing state.gprs.get_reg(FOO).read_X). For unsigned or
# signed, we can check that it is indeed an Attribute and that
# node.args is empty (neither function takes arguments).
if node.args or not isinstance(node.func, cst.Attribute):
return None
# Now, check whether we're calling one of the functions we're
# interested in.
if node.func.attr.value == 'read_signed':
signed = True
elif node.func.attr.value == 'read_unsigned':
signed = False
else:
return None
# Check that node.func.value really does represent something of the
# form "state.gprs.get_reg(FOO)".
ret = ImplTransformer.match_get_reg(node.func.value)
if ret is None:
return None
if signed:
# If this is a call to read_signed, we want to wrap the returned
# value in a call to a fake sign decode function.
return cst.Call(func=cst.Name('from_2s_complement'),
args=[cst.Arg(value=ret)])
else:
return ret
@staticmethod
def _spot_csr_read(node: cst.Call) -> Optional[cst.BaseExpression]:
# Detect
#
# state.read_csr(FOO)
#
# and replace it with the expression
#
# CSRs[FOO]
# Check we have exactly one argument
if len(node.args) != 1:
return None
# Check this is state.read_csr
if not (isinstance(node.func, cst.Attribute) and
isinstance(node.func.value, cst.Name) and
node.func.value.value == 'state' and
node.func.attr.value == 'read_csr'):
return None
return make_aref('CSRs', node.args[0].value)
@staticmethod
def _spot_wsr_read_idx(node: cst.Call) -> Optional[cst.BaseExpression]:
# Detect
#
# state.wsrs.read_at_idx(FOO)
#
# and replace it with the expression
#
# WSRs[FOO]
# Check we have exactly one argument
if len(node.args) != 1:
return None
# Check this is state.wsrs.read_at_idx
if not (isinstance(node.func, cst.Attribute) and
isinstance(node.func.value, cst.Attribute) and
isinstance(node.func.value.value, cst.Name) and
node.func.value.value.value == 'state' and
node.func.value.attr.value == 'wsrs' and
node.func.attr.value == 'read_at_idx'):
return None
return make_aref('WSRs', node.args[0].value)
@staticmethod
def _spot_wsr_read_name(node: cst.Call) -> Optional[cst.BaseExpression]:
# Detect
#
# state.wsrs.FOO.read_unsigned()
#
# and replace it with the expression
#
# FOO
# Check we have no arguments
if len(node.args) != 0:
return None
# Check this is A.B.C.D for names A, B, C, D.
if not (isinstance(node.func, cst.Attribute) and
isinstance(node.func.value, cst.Attribute) and
isinstance(node.func.value.value, cst.Attribute) and
isinstance(node.func.value.value.value, cst.Name)):
return None
a_name = node.func.value.value.value
b_name = node.func.value.value.attr
c_name = node.func.value.attr
d_name = node.func.attr
if not (a_name.value == 'state' and
b_name.value == 'wsrs' and
d_name.value == 'read_unsigned'):
return None
return c_name
def leave_Call(self,
orig: cst.Call,
updated: cst.Call) -> cst.BaseExpression:
# Handle:
#
# state.gprs.get_reg(FOO).read_unsigned()
# state.gprs.get_reg(FOO).read_signed()
#
reg_read = ImplTransformer._spot_reg_read(updated)
if reg_read is not None:
return reg_read
csr_read = ImplTransformer._spot_csr_read(updated)
if csr_read is not None:
return csr_read
wsr_read_idx = ImplTransformer._spot_wsr_read_idx(updated)
if wsr_read_idx is not None:
return wsr_read_idx
wsr_read_name = ImplTransformer._spot_wsr_read_name(updated)
if wsr_read_name is not None:
return wsr_read_name
return updated
@staticmethod
def _spot_reg_write(node: cst.Expr) -> Optional[NBAssign]:
# Spot
#
# state.gprs.get_reg(foo).write_unsigned(bar)
# state.gprs.get_reg(foo).write_signed(bar)
#
# and turn them into
#
# GPRs[FOO] = bar
# GPRs[FOO] = to_2s_complement(bar)
if not isinstance(node.value, cst.Call):
return None
call = node.value
if len(call.args) != 1 or not isinstance(call.func, cst.Attribute):
return None
value = call.args[0].value
if call.func.attr.value == 'write_unsigned':
rhs = value
elif call.func.attr.value == 'write_signed':
rhs = cst.Call(func=cst.Name('to_2s_complement'),
args=[cst.Arg(value=value)])
else:
return None
# We expect call.func.value to be match state.gprs.get_reg(foo).
# Extract the array reference if we can.
reg_ref = ImplTransformer.match_get_reg(call.func.value)
if reg_ref is None:
return None
return NBAssign.make(reg_ref, rhs)
@staticmethod
def _spot_csr_write(node: cst.Expr) -> Optional[NBAssign]:
# Spot
#
# state.write_csr(csr, new_val)
#
# and turn it into
#
# CSRs[csr] = new_val
if not isinstance(node.value, cst.Call):
return None
call = node.value
if len(call.args) != 2 or not isinstance(call.func, cst.Attribute):
return None
if not (isinstance(call.func.value, cst.Name) and
call.func.value.value == 'state' and
call.func.attr.value == 'write_csr'):
return None
idx = call.args[0].value
rhs = call.args[1].value
return NBAssign.make(make_aref('CSRs', idx), rhs)
@staticmethod
def _spot_wsr_write_idx(node: cst.Expr) -> Optional[NBAssign]:
# Spot
#
# state.wsrs.write_at_idx(wsr, new_val)
#
# and turn it into
#
# WSRs[wsr] = new_val
if not isinstance(node.value, cst.Call):
return None
call = node.value
if len(call.args) != 2 or not isinstance(call.func, cst.Attribute):
return None
func = call.func
if not (isinstance(func.value, cst.Attribute) and
isinstance(func.value.value, cst.Name) and
func.value.value.value == 'state' and
func.value.attr.value == 'wsrs' and
func.attr.value == 'write_at_idx'):
return None
idx = call.args[0].value
rhs = call.args[1].value
return NBAssign.make(make_aref('WSRs', idx), rhs)
@staticmethod
def _spot_wsr_write_name(node: cst.Expr) -> Optional[NBAssign]:
# Spot
#
# state.wsrs.FOO.write_unsigned(new_val)
#
# and turn it into
#
# FOO = new_val
if not isinstance(node.value, cst.Call):
return None
call = node.value
if len(call.args) != 1 or not isinstance(call.func, cst.Attribute):
return None
# Check this is A.B.C.D for names A, B, C, D.
if not (isinstance(call.func, cst.Attribute) and
isinstance(call.func.value, cst.Attribute) and
isinstance(call.func.value.value, cst.Attribute) and
isinstance(call.func.value.value.value, cst.Name)):
return None
a_name = call.func.value.value.value
b_name = call.func.value.value.attr
c_name = call.func.value.attr
d_name = call.func.attr
if not (a_name.value == 'state' and
b_name.value == 'wsrs' and
d_name.value == 'write_unsigned'):
return None
rhs = call.args[0].value
return NBAssign.make(c_name, rhs)
@staticmethod
def _spot_flag_write(node: cst.Expr) -> Optional[NBAssign]:
# Spot
#
# state.set_flags(fg, flags)
#
# and turn it into
#
# FLAGs[fg] = flags
if not isinstance(node.value, cst.Call):
return None
call = node.value
if len(call.args) != 2 or not isinstance(call.func, cst.Attribute):
return None
if not (isinstance(call.func.value, cst.Name) and
call.func.value.value == 'state' and
call.func.attr.value == 'set_flags'):
return None
fg = call.args[0].value
flags = call.args[1].value
return NBAssign.make(make_aref('FLAGs', fg), flags)
@staticmethod
def _spot_set_next_pc(node: cst.Expr) -> Optional[NBAssign]:
# Spot
#
# state.set_next_pc(next_pc)
#
# and turn it into
#
# PC <= next_pc
if not isinstance(node.value, cst.Call):
return None
call = node.value
if len(call.args) != 1 or not isinstance(call.func, cst.Attribute):
return None
if not (isinstance(call.func.value, cst.Name) and
call.func.value.value == 'state' and
call.func.attr.value == 'set_next_pc'):
return None
next_pc = call.args[0].value
return NBAssign.make(cst.Name(value='PC'), next_pc)
def leave_Expr(self,
orig: cst.Expr,
updated: cst.Expr) -> cst.BaseSmallStatement:
reg_write = ImplTransformer._spot_reg_write(updated)
if reg_write is not None:
return reg_write
csr_write = ImplTransformer._spot_csr_write(updated)
if csr_write is not None:
return csr_write
wsr_write_idx = ImplTransformer._spot_wsr_write_idx(updated)
if wsr_write_idx is not None:
return wsr_write_idx
wsr_write_name = ImplTransformer._spot_wsr_write_name(updated)
if wsr_write_name is not None:
return wsr_write_name
flag_write = ImplTransformer._spot_flag_write(updated)
if flag_write is not None:
return flag_write
set_pc_next = ImplTransformer._spot_set_next_pc(updated)
if set_pc_next is not None:
return set_pc_next
return updated
def read_implementation(path: str) -> Dict[str, str]:
'''Read the implementation at path (probably insn.py)
Returns a dictionary from instruction class name to its pseudo-code
implementation. An instruction class name looks like ADDI (for addi) or
BNADDM (for bn.addm).
'''
with open(path, 'r') as handle:
node = cst.parse_module(handle.read())
# Extract the function bodies
visitor = ImplTransformer()
node.visit(visitor)
# Render the function bodies
return {cls: ''.join(node.code_for_node(stmt) for stmt in body)
for cls, body in visitor.impls.items()}
| {
"content_hash": "9e658bad0e41b75b4e4c47862672649c",
"timestamp": "",
"source": "github",
"line_count": 548,
"max_line_length": 79,
"avg_line_length": 33.15510948905109,
"alnum_prop": 0.5652485001926358,
"repo_name": "lowRISC/opentitan",
"id": "b26bcd8c556b8e9353e8adcc4b1fbd5b07823f1b",
"size": "18317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hw/ip/otbn/util/docs/get_impl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "516881"
},
{
"name": "C",
"bytes": "4864968"
},
{
"name": "C++",
"bytes": "1629214"
},
{
"name": "CSS",
"bytes": "3281"
},
{
"name": "Dockerfile",
"bytes": "6732"
},
{
"name": "Emacs Lisp",
"bytes": "411542"
},
{
"name": "HTML",
"bytes": "149270"
},
{
"name": "Makefile",
"bytes": "20646"
},
{
"name": "Python",
"bytes": "2576872"
},
{
"name": "Rust",
"bytes": "856480"
},
{
"name": "SCSS",
"bytes": "54700"
},
{
"name": "Shell",
"bytes": "119163"
},
{
"name": "Smarty",
"bytes": "771102"
},
{
"name": "Starlark",
"bytes": "688003"
},
{
"name": "Stata",
"bytes": "3676"
},
{
"name": "SystemVerilog",
"bytes": "14853322"
},
{
"name": "Tcl",
"bytes": "361936"
},
{
"name": "Verilog",
"bytes": "3296"
}
],
"symlink_target": ""
} |
import os.path
from twisted.internet.protocol import Factory
from twisted.logger import Logger
import sqlite3
from .game import Game
from .protocol import ServerProtocol
from . import version
from shared.card_database_manager import CardDatabaseManager
from shared.path import getScriptDirectory
class ServerFactory(Factory):
black_cards = -1
card_database = None
games = []
log = Logger()
serverDatabase = None
users=[]
def __init__(self, black_cards, database_file):
self.black_cards = black_cards
self.database_file = database_file
def buildProtocol(self, addr):
return ServerProtocol(self)
def openServerDatabase(self):
self.serverDatabase = sqlite3.connect(os.path.join(getScriptDirectory(), "server.db"))
cursor = self.serverDatabase.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS 'users' ('id' INTEGER PRIMARY KEY, 'name' VARCHAR(30), 'password' CHAR(128))")
cursor.execute("CREATE TABLE IF NOT EXISTS 'games' ('id' CHAR(32), 'name' VARCHAR(30), 'users' TEXT, 'cards' TEXT, 'password_hash' CHAR(128), 'database_hash' CHAR(128), 'server_version_major' TINYINT, 'server_version_minor' TINYINT, 'server_version_revision' TINYINT)")
self.serverDatabase.commit()
self.log.info("Loaded server database")
def loadGames(self):
cursor = self.serverDatabase.cursor()
cursor.execute('SELECT * FROM games where database_hash = ? and server_version_major = ? and server_version_minor = ?', (self.card_database.hash, version.MAJOR, version.MINOR, ))
game_rows = cursor.fetchall()
for row in game_rows:
game = {}
for colid in range(len(row)):
game[cursor.description[colid][0]] = row[colid]
game = Game.load(self, **game)
self.games.append(game)
cursor.execute('DELETE FROM games')
cursor.execute('VACUUM')
self.serverDatabase.commit()
self.log.info('loaded {count} games from database', count = len(self.games))
def startFactory(self):
self.card_database = CardDatabaseManager()
self.card_database.loadPath(self.database_file)
self.card_database.loadCards()
if len(self.card_database.getBlackCards())<self.black_cards:
self.black_cards = len(self.card_database.getBlackCards())
self.log.info('database contains only {log_source.black_cards!r} black cards, reduced command-line argument to this amount')
elif self.black_cards == -1:
self.black_cards = len(self.card_database.getBlackCards())
self.log.info("Loaded card database")
self.openServerDatabase()
self.loadGames()
# after doing all the startup stuff
self.log.info("Server up and running, waiting for incoming connections")
def stopFactory(self):
self.log.info('saving games...')
cursor = self.serverDatabase.cursor()
c = 0
for game in self.games:
if not game.open:
data = game.pack()
cursor.execute('INSERT INTO games ('+','.join(data.keys())+') VALUES ('+('?,'*len(data.keys()))[:-1]+')', tuple(data.values()))
c += 1
if c > 0:
self.serverDatabase.commit()
self.log.info('saved {count} games into database', count = c)
def createGame(self, name, password = None, rounds = None):
game = Game.create(self, name = name, password_hash = password, rounds = rounds)
self.games.append(game)
return game
def unlinkGame(self, game):
del self.games[self.games.index(game)]
def findGame(self, id):
possible_games = [g for g in self.games if g.id == id]
if len(possible_games) != 1:
return None
return possible_games[0]
def findUser(self, id):
possible_users = [u for u in self.users if u.id == id]
if len(possible_users)!=1:
return None
return possible_users[0]
def getAllUsers(self):
return self.users
def getAllGames(self):
return self.games
def gameExists(self, name):
for game in self.games:
if game.name == name:
return True
return False
| {
"content_hash": "72ceeb9bdb5743c65b82dd5dbdb87712",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 273,
"avg_line_length": 33.52542372881356,
"alnum_prop": 0.6799797775530839,
"repo_name": "Timtam/cards-against-humanity",
"id": "2ff7ea41f0b5345b8e6aaf04b853cf37eddeded5",
"size": "3956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "296335"
}
],
"symlink_target": ""
} |
import abc
import subprocess
import jinja2
import tempfile
import datetime
import logging
import typing
import os
import utils
import browsers
def GetTemplateFileForBrowser(browser_driver: browsers.BrowserDriver,
template_file: str) -> str:
if browser_driver.name == "safari":
return f"safari_{template_file}"
else:
return template_file
class ScenarioOSADriver(abc.ABC):
"""Base Class encapsulating OSA script driving a scenario, with setup and tear
down.
"""
def __init__(self, scenario_name, duration: datetime.timedelta):
self.name = scenario_name
self.script_process = None
self.osa_script = None
self.duration = duration
def Launch(self):
"""Starts the driver script.
"""
assert self.osa_script is not None
logging.debug(f"Starting scenario {self.name}")
self.script_process = subprocess.Popen(['osascript', self.osa_script.name])
def Wait(self):
"""Waits for the script to complete.
"""
assert self.script_process is not None, "Driver wasn't launched."
logging.debug(f"Waiting for scenario {self.name}")
self.script_process.wait()
def TearDown(self):
"""Terminates the script if currently running and ensures related processes
are cleaned up.
"""
logging.debug(f"Tearing down scenario {self.name}")
if self.script_process:
utils.TerminateProcess(self.script_process)
self.osa_script.close()
@abc.abstractmethod
def Summary(self):
"""Returns a dictionary describing the scenarios parameters.
"""
pass
def IsRunning(self) -> bool:
"""Returns true if the script is currently running.
"""
return self.script_process.poll() is None
def _CompileTemplate(self, template_file: str, extra_args: typing.Dict):
"""Compiles script `template_file`, feeding `extra_args` into a temporary
file.
"""
loader = jinja2.FileSystemLoader(
os.path.join(os.path.dirname(__file__), "driver_scripts_templates"))
env = jinja2.Environment(loader=loader)
template = env.get_template(template_file)
self.osa_script = tempfile.NamedTemporaryFile('w+t')
self.osa_script.write(template.render(**extra_args))
self.osa_script.flush()
self._args = extra_args
def Summary(self):
"""Returns a dictionary describing the scenarios parameters.
"""
return {'name': self.name, **self._args}
class ScenarioWithBrowserOSADriver(ScenarioOSADriver):
"""Specialisation for OSA script that runs with a browser.
"""
def __init__(self, scenario_name, browser_driver: browsers.BrowserDriver,
duration: datetime.timedelta):
super().__init__(f"{browser_driver.name}_{scenario_name}", duration)
self.browser = browser_driver
def Launch(self):
self.browser.Launch()
super().Launch()
def TearDown(self):
super().TearDown()
self.browser.TearDown()
def Summary(self):
"""Returns a dictionary describing the scenarios parameters.
"""
return {**super().Summary(), 'browser': self.browser.Summary()}
def _CompileTemplate(self, template_file, extra_args: typing.Dict):
return super()._CompileTemplate(template_file, {
"browser": self.browser.process_name,
**extra_args
})
class IdleScenario(ScenarioOSADriver):
"""Scenario that lets the system idle.
"""
def __init__(self, duration: datetime.timedelta, scenario_name="idle"):
super().__init__(scenario_name, duration)
self._CompileTemplate("idle", {
"delay": duration.total_seconds(),
})
class IdleOnSiteScenario(ScenarioWithBrowserOSADriver):
"""Scenario that lets a browser idle on a web page.
"""
def __init__(self, browser_driver: browsers.BrowserDriver,
duration: datetime.timedelta, site_url: str, scenario_name):
super().__init__(scenario_name, browser_driver, duration)
self._CompileTemplate(
GetTemplateFileForBrowser(browser_driver, "idle_on_site"), {
"idle_site": site_url,
"delay": duration.total_seconds(),
})
@staticmethod
def Wiki(browser_driver: browsers.BrowserDriver,
duration: datetime.timedelta):
return IdleOnSiteScenario(browser_driver, duration,
"http://www.wikipedia.com/wiki/Alessandro_Volta",
"idle_on_wiki")
@staticmethod
def Youtube(browser_driver: browsers.BrowserDriver,
duration: datetime.timedelta):
return IdleOnSiteScenario(
browser_driver, duration,
"https://www.youtube.com/watch?v=9EE_ICC_wFw?autoplay=1",
"idle_on_youtube")
class ZeroWindowScenario(ScenarioWithBrowserOSADriver):
"""Scenario that lets a browser idle with no window.
"""
def __init__(self,
browser_driver: browsers.BrowserDriver,
duration: datetime.timedelta,
scenario_name="zero_window"):
super().__init__(scenario_name, browser_driver, duration)
self._CompileTemplate(
GetTemplateFileForBrowser(browser_driver, "zero_window"), {
"delay": duration.total_seconds(),
})
class NavigationScenario(ScenarioWithBrowserOSADriver):
"""Scenario that has a browser navigating on web pages in a loop.
"""
NAVIGATED_SITES = [
"https://amazon.com",
"https://www.amazon.com/s?k=computer&ref=nb_sb_noss_2",
"https://google.com", "https://www.google.com/search?q=computers",
"https://www.youtube.com",
"https://www.youtube.com/results?search_query=computers",
"https://docs.google.com/document/d/1Ll-8Nvo6JlhzKEttst8GHWCc7_A8Hluy2fX99cy4Sfg/edit?usp=sharing"
]
def __init__(self,
browser_driver: browsers.BrowserDriver,
navigation_duration: datetime.timedelta,
navigation_cycles: int,
sites=NAVIGATED_SITES,
scenario_name="navigation"):
super().__init__(scenario_name, browser_driver,
navigation_duration * navigation_cycles * len(sites))
self._CompileTemplate(
GetTemplateFileForBrowser(browser_driver, "navigation"), {
"per_navigation_delay": navigation_duration.total_seconds(),
"navigation_cycles": navigation_cycles,
"sites": ",".join([f'"{site}"' for site in sites])
})
class MeetScenario(ScenarioWithBrowserOSADriver):
"""Scenario that has the browser join a Google Meet room.
"""
def __init__(self,
browser_driver: browsers.BrowserDriver,
duration: datetime.timedelta,
meeting_id: int,
scenario_name="meet"):
super().__init__(scenario_name, browser_driver, duration)
self._CompileTemplate(GetTemplateFileForBrowser(browser_driver, "meet"), {
"delay": duration.total_seconds(),
"meeting_id": meeting_id
})
def MakeScenarioDriver(scenario_name,
browser_driver: browsers.BrowserDriver,
meet_meeting_id=None) -> ScenarioOSADriver:
"""Creates scenario driver by name.
Args:
scenario_name: Identifier for the scenario to create. Supported scenarios
are: meet, idle_on_wiki, idle_on_youtube, navigation, zero_window and
idle.
browser_driver: Browser the scenario is created with.
meet_meeting_id: Optional meeting id used for meet scenario.
"""
if "idle" == scenario_name:
return IdleScenario(datetime.timedelta(minutes=60))
if not browser_driver:
return None
if "meet" == scenario_name:
return MeetScenario(browser_driver,
datetime.timedelta(minutes=60),
meeting_id=meet_meeting_id)
if "idle_on_wiki" == scenario_name:
return IdleOnSiteScenario.Wiki(browser_driver,
datetime.timedelta(minutes=60))
if "idle_on_youtube" == scenario_name:
return IdleOnSiteScenario.Youtube(browser_driver,
datetime.timedelta(minutes=60))
if "navigation" == scenario_name:
return NavigationScenario(
browser_driver,
navigation_duration=datetime.timedelta(seconds=15),
navigation_cycles=70)
if "zero_window" == scenario_name:
return ZeroWindowScenario(browser_driver, datetime.timedelta(minutes=60))
return None
| {
"content_hash": "7599f419783acfcec61d0e885361b314",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 104,
"avg_line_length": 33.54435483870968,
"alnum_prop": 0.6538045438153625,
"repo_name": "ric2b/Vivaldi-browser",
"id": "0b9f95f3acb203b06278fef3c60578365192b83b",
"size": "8482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chromium/tools/mac/power/scenarios.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import httplib
from flask import request
from framework.exceptions import HTTPError
from framework.auth.decorators import must_be_logged_in
from website.addons.s3 import utils
from website.project.decorators import must_have_addon
from website.project.decorators import must_have_permission
from website.project.decorators import must_not_be_registration
@must_be_logged_in
def s3_post_user_settings(auth, **kwargs):
user_addon = auth.user.get_or_add_addon('s3')
try:
access_key = request.json['access_key']
secret_key = request.json['secret_key']
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
if not utils.can_list(access_key, secret_key):
return {
'message': ('Unable to list buckets.\n'
'Listing buckets is required permission that can be changed via IAM')
}, httplib.BAD_REQUEST
user_addon.access_key = access_key
user_addon.secret_key = secret_key
user_addon.save()
@must_have_permission('write')
@must_have_addon('s3', 'node')
def s3_authorize_node(auth, node_addon, **kwargs):
try:
access_key = request.json['access_key']
secret_key = request.json['secret_key']
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
if not utils.can_list(access_key, secret_key):
return {
'message': ('Unable to list buckets.\n'
'Listing buckets is required permission that can be changed via IAM')
}, httplib.BAD_REQUEST
user_addon = auth.user.get_or_add_addon('s3')
user_addon.access_key = access_key
user_addon.secret_key = secret_key
user_addon.save()
node_addon.authorize(user_addon, save=True)
return node_addon.to_json(auth.user)
@must_be_logged_in
@must_have_permission('write')
@must_have_addon('s3', 'node')
@must_have_addon('s3', 'user')
def s3_node_import_auth(auth, node_addon, user_addon, **kwargs):
node_addon.authorize(user_addon, save=True)
return node_addon.to_json(auth.user)
@must_have_permission('write')
@must_have_addon('s3', 'user')
@must_have_addon('s3', 'node')
@must_not_be_registration
def s3_post_node_settings(node, auth, user_addon, node_addon, **kwargs):
# Fail if user settings not authorized
if not user_addon.has_auth:
raise HTTPError(httplib.BAD_REQUEST)
# If authorized, only owner can change settings
if node_addon.has_auth and node_addon.user_settings.owner != auth.user:
raise HTTPError(httplib.BAD_REQUEST)
# Claiming the node settings
if not node_addon.user_settings:
node_addon.user_settings = user_addon
bucket = request.json.get('s3_bucket', '')
if not utils.bucket_exists(user_addon.access_key, user_addon.secret_key, bucket):
error_message = ('We are having trouble connecting to that bucket. '
'Try a different one.')
return {'message': error_message}, httplib.BAD_REQUEST
if bucket != node_addon.bucket:
# Update node settings
node_addon.bucket = bucket
node_addon.save()
node.add_log(
action='s3_bucket_linked',
params={
'node': node._id,
'project': node.parent_id,
'bucket': node_addon.bucket,
},
auth=auth,
)
return node_addon.to_json(auth.user)
@must_be_logged_in
@must_have_addon('s3', 'node')
@must_have_permission('write')
@must_not_be_registration
def s3_get_node_settings(auth, node_addon, **kwargs):
result = node_addon.to_json(auth.user)
result['urls'] = utils.serialize_urls(node_addon, auth.user)
return {'result': result}
@must_be_logged_in
@must_have_addon('s3', 'node')
@must_have_addon('s3', 'user')
@must_have_permission('write')
@must_not_be_registration
def s3_get_bucket_list(auth, node_addon, user_addon, **kwargs):
return {
'buckets': utils.get_bucket_names(user_addon)
}
@must_have_permission('write')
@must_have_addon('s3', 'node')
@must_not_be_registration
def s3_delete_node_settings(auth, node_addon, **kwargs):
node_addon.deauthorize(auth=auth, save=True)
return node_addon.to_json(auth.user)
@must_be_logged_in
@must_have_addon('s3', 'user')
def s3_delete_user_settings(user_addon, auth, **kwargs):
user_addon.revoke_auth(auth=auth, save=True)
| {
"content_hash": "afbe4073a5cd5275e7ade156f000027d",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 85,
"avg_line_length": 29.571428571428573,
"alnum_prop": 0.6583850931677019,
"repo_name": "bdyetton/prettychart",
"id": "003e489a58654d3429eb7da3d9ed4c6a3fe897e2",
"size": "4347",
"binary": false,
"copies": "1",
"ref": "refs/heads/feature/pretty_charts",
"path": "website/addons/s3/views/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "111251"
},
{
"name": "HTML",
"bytes": "32337"
},
{
"name": "JavaScript",
"bytes": "1100414"
},
{
"name": "Mako",
"bytes": "518865"
},
{
"name": "Python",
"bytes": "3051924"
},
{
"name": "Shell",
"bytes": "1735"
}
],
"symlink_target": ""
} |
"""The spm module provides basic functions for interfacing with matlab
and spm to access spm tools.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
__docformat__ = 'restructuredtext'
# Standard library imports
import os
from glob import glob
# Third-party imports
import numpy as np
import scipy.io as sio
# Local imports
from nipype.interfaces.base import (Bunch, traits, TraitedSpec, File, Directory,
OutputMultiPath, InputMultiPath, isdefined)
from nipype.interfaces.spm.base import (SPMCommand, SPMCommandInputSpec,
scans_for_fnames)
from nipype.utils.filemanip import (filename_to_list, list_to_filename,
split_filename)
from ... import logging
logger = logging.getLogger('interface')
class Level1DesignInputSpec(SPMCommandInputSpec):
spm_mat_dir = Directory(exists=True, field='dir', desc='directory to store SPM.mat file (opt)')
timing_units = traits.Enum('secs', 'scans', field='timing.units',
desc='units for specification of onsets',
mandatory=True)
interscan_interval = traits.Float(field='timing.RT',
desc='Interscan interval in secs',
mandatory=True)
microtime_resolution = traits.Int(field='timing.fmri_t',
desc='Number of time-bins per scan in secs (opt)')
microtime_onset = traits.Float(field='timing.fmri_t0',
desc='The onset/time-bin in seconds for alignment (opt)')
session_info = traits.Any(field='sess',
desc='Session specific information generated by ``modelgen.SpecifyModel``',
mandatory=True)
factor_info = traits.List(traits.Dict(traits.Enum('name', 'levels')),
field='fact', desc='Factor specific information file (opt)')
bases = traits.Dict(traits.Enum('hrf', 'fourier', 'fourier_han',
'gamma', 'fir'), field='bases', desc="""
dict {'name':{'basesparam1':val,...}}
name : string
Name of basis function (hrf, fourier, fourier_han,
gamma, fir)
hrf :
derivs : 2-element list
Model HRF Derivatives. No derivatives: [0,0],
Time derivatives : [1,0], Time and Dispersion
derivatives: [1,1]
fourier, fourier_han, gamma, fir:
length : int
Post-stimulus window length (in seconds)
order : int
Number of basis functions
""", mandatory=True)
volterra_expansion_order = traits.Enum(1, 2, field='volt',
desc='Model interactions - yes:1, no:2 (opt)')
global_intensity_normalization = traits.Enum('none', 'scaling', field='global',
desc='Global intensity normalization - scaling or none (opt)')
mask_image = File(exists=True, field='mask',
desc='Image for explicitly masking the analysis (opt)')
mask_threshold = traits.Either(traits.Enum('-Inf'), traits.Float(),
desc="Thresholding for the mask (opt, '-Inf')", default='-Inf', usedefault=True)
model_serial_correlations = traits.Enum('AR(1)', 'none', field='cvi',
desc='Model serial correlations AR(1) or none (opt)')
class Level1DesignOutputSpec(TraitedSpec):
spm_mat_file = File(exists=True, desc='SPM mat file')
class Level1Design(SPMCommand):
"""Generate an SPM design matrix
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=61
Examples
--------
>>> level1design = Level1Design()
>>> level1design.inputs.timing_units = 'secs'
>>> level1design.inputs.interscan_interval = 2.5
>>> level1design.inputs.bases = {'hrf':{'derivs': [0,0]}}
>>> level1design.inputs.session_info = 'session_info.npz'
>>> level1design.run() # doctest: +SKIP
"""
input_spec = Level1DesignInputSpec
output_spec = Level1DesignOutputSpec
_jobtype = 'stats'
_jobname = 'fmri_spec'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['spm_mat_dir', 'mask_image']:
return np.array([str(val)], dtype=object)
if opt in ['session_info']: #, 'factor_info']:
if isinstance(val, dict):
return [val]
else:
return val
return val
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(Level1Design, self)._parse_inputs(skip=('mask_threshold'))
for sessinfo in einputs[0]['sess']:
sessinfo['scans'] = scans_for_fnames(filename_to_list(sessinfo['scans']), keep4d=False)
if not isdefined(self.inputs.spm_mat_dir):
einputs[0]['dir'] = np.array([str(os.getcwd())], dtype=object)
return einputs
def _make_matlab_command(self, content):
"""validates spm options and generates job structure
if mfile is True uses matlab .m file
else generates a job structure and saves in .mat
"""
if isdefined(self.inputs.mask_image):
# SPM doesn't handle explicit masking properly, especially
# when you want to use the entire mask image
postscript = "load SPM;\n"
postscript += "SPM.xM.VM = spm_vol('%s');\n" % list_to_filename(self.inputs.mask_image)
postscript += "SPM.xM.I = 0;\n"
postscript += "SPM.xM.T = [];\n"
postscript += "SPM.xM.TH = ones(size(SPM.xM.TH))*(%s);\n" % self.inputs.mask_threshold
postscript += "SPM.xM.xs = struct('Masking', 'explicit masking only');\n"
postscript += "save SPM SPM;\n"
else:
postscript = None
return super(Level1Design, self)._make_matlab_command(content, postscript=postscript)
def _list_outputs(self):
outputs = self._outputs().get()
spm = os.path.join(os.getcwd(), 'SPM.mat')
outputs['spm_mat_file'] = spm
return outputs
class EstimateModelInputSpec(SPMCommandInputSpec):
spm_mat_file = File(exists=True, field='spmmat', desc='absolute path to SPM.mat',
copyfile=True,
mandatory=True)
estimation_method = traits.Dict(traits.Enum('Classical', 'Bayesian2', 'Bayesian'),
field='method',
desc='Classical, Bayesian2, Bayesian (dict)',
mandatory=True)
flags = traits.Str(desc='optional arguments (opt)')
class EstimateModelOutputSpec(TraitedSpec):
mask_image = File(exists=True, desc='binary mask to constrain estimation')
beta_images = OutputMultiPath(File(exists=True), desc='design parameter estimates')
residual_image = File(exists=True, desc='Mean-squared image of the residuals')
RPVimage = File(exists=True, desc='Resels per voxel image')
spm_mat_file = File(exist=True, desc='Updated SPM mat file')
class EstimateModel(SPMCommand):
"""Use spm_spm to estimate the parameters of a model
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=71
Examples
--------
>>> est = EstimateModel()
>>> est.inputs.spm_mat_file = 'SPM.mat'
>>> est.run() # doctest: +SKIP
"""
input_spec = EstimateModelInputSpec
output_spec = EstimateModelOutputSpec
_jobtype = 'stats'
_jobname = 'fmri_est'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'spm_mat_file':
return np.array([str(val)], dtype=object)
if opt == 'estimation_method':
if isinstance(val, str):
return {'%s' % val: 1}
else:
return val
return val
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(EstimateModel, self)._parse_inputs(skip=('flags'))
if isdefined(self.inputs.flags):
einputs[0].update(self.inputs.flags)
return einputs
def _list_outputs(self):
outputs = self._outputs().get()
pth, _ = os.path.split(self.inputs.spm_mat_file)
mask = os.path.join(pth, 'mask.img')
outputs['mask_image'] = mask
spm = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False)
betas = []
for vbeta in spm['SPM'][0, 0].Vbeta[0]:
betas.append(str(os.path.join(pth, vbeta.fname[0])))
if betas:
outputs['beta_images'] = betas
resms = os.path.join(pth, 'ResMS.img')
outputs['residual_image'] = resms
rpv = os.path.join(pth, 'RPV.img')
outputs['RPVimage'] = rpv
spm = os.path.join(pth, 'SPM.mat')
outputs['spm_mat_file'] = spm
return outputs
class EstimateContrastInputSpec(SPMCommandInputSpec):
spm_mat_file = File(exists=True, field='spmmat',
desc='Absolute path to SPM.mat',
copyfile=True,
mandatory=True)
contrasts = traits.List(
traits.Either(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('F'),
traits.List(traits.Either(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float),
traits.List(traits.Float)))))),
desc="""List of contrasts with each contrast being a list of the form:
[('name', 'stat', [condition list], [weight list], [session list])]. if
session list is None or not provided, all sessions are used. For F
contrasts, the condition list should contain previously defined
T-contrasts.""",
mandatory=True)
beta_images = InputMultiPath(File(exists=True),
desc='Parameter estimates of the design matrix',
copyfile=False,
mandatory=True)
residual_image = File(exists=True, desc='Mean-squared image of the residuals',
copyfile=False,
mandatory=True)
use_derivs = traits.Bool(desc='use derivatives for estimation',
xor=['group_contrast'])
group_contrast = traits.Bool(desc='higher level contrast',
xor=['use_derivs'])
class EstimateContrastOutputSpec(TraitedSpec):
con_images = OutputMultiPath(File(exists=True), desc='contrast images from a t-contrast')
spmT_images = OutputMultiPath(File(exists=True), desc='stat images from a t-contrast')
ess_images = OutputMultiPath(File(exists=True), desc='contrast images from an F-contrast')
spmF_images = OutputMultiPath(File(exists=True), desc='stat images from an F-contrast')
spm_mat_file = File(exist=True, desc='Updated SPM mat file')
class EstimateContrast(SPMCommand):
"""use spm_contrasts to estimate contrasts of interest
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> est = spm.EstimateContrast()
>>> est.inputs.spm_mat_file = 'SPM.mat'
>>> cont1 = ('Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5])
>>> cont2 = ('Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1])
>>> contrasts = [cont1,cont2]
>>> est.inputs.contrasts = contrasts
>>> est.run() # doctest: +SKIP
"""
input_spec = EstimateContrastInputSpec
output_spec = EstimateContrastOutputSpec
_jobtype = 'stats'
_jobname = 'con'
def _make_matlab_command(self, _):
"""validates spm options and generates job structure
"""
contrasts = []
cname = []
for i, cont in enumerate(self.inputs.contrasts):
cname.insert(i, cont[0])
contrasts.insert(i, Bunch(name=cont[0],
stat=cont[1],
conditions=cont[2],
weights=None,
sessions=None))
if len(cont) >= 4:
contrasts[i].weights = cont[3]
if len(cont) >= 5:
contrasts[i].sessions = cont[4]
script = "% generated by nipype.interfaces.spm\n"
script += "spm_defaults;\n"
script += "jobs{1}.stats{1}.con.spmmat = {'%s'};\n" % self.inputs.spm_mat_file
script += "load(jobs{1}.stats{1}.con.spmmat{:});\n"
script += "SPM.swd = '%s';\n" % os.getcwd()
script += "save(jobs{1}.stats{1}.con.spmmat{:},'SPM');\n"
script += "names = SPM.xX.name;\n"
# get names for columns
if isdefined(self.inputs.group_contrast) and self.inputs.group_contrast:
script += "condnames=names;\n"
else:
if self.inputs.use_derivs:
script += "pat = 'Sn\([0-9*]\) (.*)';\n"
else:
script += "pat = 'Sn\([0-9*]\) (.*)\*bf\(1\)|Sn\([0-9*]\) .*\*bf\([2-9]\)|Sn\([0-9*]\) (.*)';\n"
script += "t = regexp(names,pat,'tokens');\n"
# get sessidx for columns
script += "pat1 = 'Sn\(([0-9].*)\)\s.*';\n"
script += "t1 = regexp(names,pat1,'tokens');\n"
script += "for i0=1:numel(t),condnames{i0}='';condsess(i0)=0;if ~isempty(t{i0}{1}),condnames{i0} = t{i0}{1}{1};condsess(i0)=str2num(t1{i0}{1}{1});end;end;\n"
# BUILD CONTRAST SESSION STRUCTURE
for i, contrast in enumerate(contrasts):
if contrast.stat == 'T':
script += "consess{%d}.tcon.name = '%s';\n" % (i + 1, contrast.name)
script += "consess{%d}.tcon.convec = zeros(1,numel(names));\n" % (i + 1)
for c0, cond in enumerate(contrast.conditions):
script += "idx = strmatch('%s',condnames,'exact');\n" % (cond)
script += "if isempty(idx), throw(MException('CondName:Chk', sprintf('Condition %%s not found in design','%s'))); end;\n" % cond
if contrast.sessions:
for sno, sw in enumerate(contrast.sessions):
script += "sidx = find(condsess(idx)==%d);\n" % (sno + 1)
script += "consess{%d}.tcon.convec(idx(sidx)) = %f;\n" % (i + 1, sw * contrast.weights[c0])
else:
script += "consess{%d}.tcon.convec(idx) = %f;\n" % (i + 1, contrast.weights[c0])
for i, contrast in enumerate(contrasts):
if contrast.stat == 'F':
script += "consess{%d}.fcon.name = '%s';\n" % (i + 1, contrast.name)
for cl0, fcont in enumerate(contrast.conditions):
try:
tidx = cname.index(fcont[0])
except:
Exception("Contrast Estimate: could not get index of" \
" T contrast. probably not defined prior " \
"to the F contrasts")
script += "consess{%d}.fcon.convec{%d} = consess{%d}.tcon.convec;\n" % (i + 1, cl0 + 1, tidx + 1)
script += "jobs{1}.stats{1}.con.consess = consess;\n"
script += "if strcmp(spm('ver'),'SPM8'), spm_jobman('initcfg');jobs=spm_jobman('spm5tospm8',{jobs});end\n"
script += "spm_jobman('run',jobs);"
return script
def _list_outputs(self):
outputs = self._outputs().get()
pth, _ = os.path.split(self.inputs.spm_mat_file)
spm = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False)
con_images = []
spmT_images = []
for con in spm['SPM'][0, 0].xCon[0]:
con_images.append(str(os.path.join(pth, con.Vcon[0, 0].fname[0])))
spmT_images.append(str(os.path.join(pth, con.Vspm[0, 0].fname[0])))
if con_images:
outputs['con_images'] = con_images
outputs['spmT_images'] = spmT_images
ess = glob(os.path.join(pth, 'ess*.img'))
if len(ess) > 0:
outputs['ess_images'] = sorted(ess)
spmf = glob(os.path.join(pth, 'spmF*.img'))
if len(spmf) > 0:
outputs['spmF_images'] = sorted(spmf)
outputs['spm_mat_file'] = self.inputs.spm_mat_file
return outputs
class ThresholdInputSpec(SPMCommandInputSpec):
spm_mat_file = File(exists=True, desc='absolute path to SPM.mat', copyfile=True, mandatory=True)
stat_image = File(exists=True, desc='stat image', copyfile=False, mandatory=True)
contrast_index = traits.Int(mandatory=True, desc='which contrast in the SPM.mat to use')
use_fwe_correction = traits.Bool(True, usedefault=True, desc="whether to use FWE (Bonferroni) correction for initial threshold (height_threshold_type has to be set to p-value)")
use_topo_fdr = traits.Bool(True, usedefault=True, desc="whether to use FDR over cluster extent probabilities")
height_threshold = traits.Float(0.05, usedefault=True, desc="value for initial thresholding (defining clusters)")
height_threshold_type = traits.Enum('p-value', 'stat', usedefault=True, desc="Is the cluster forming threshold a stat value or p-value?")
extent_fdr_p_threshold = traits.Float(0.05, usedefault=True, desc='p threshold on FDR corrected cluster size probabilities')
extent_threshold = traits.Int(0, usedefault=True, desc="Minimum cluster size in voxels")
force_activation = traits.Bool(False, usedefault=True, desc="In case no clusters survive the topological inference step this will pick a culster with the highes sum of t-values. Use with care.")
class ThresholdOutputSpec(TraitedSpec):
thresholded_map = File(exists=True)
n_clusters = traits.Int()
pre_topo_fdr_map = File(exists=True)
pre_topo_n_clusters = traits.Int()
activation_forced = traits.Bool()
cluster_forming_thr = traits.Float()
class Threshold(SPMCommand):
'''Topological FDR thresholding based on cluster extent/size. Smoothness is
estimated from GLM residuals but is assumed to be the same for all of the
voxels.
Examples
--------
>>> thresh = Threshold()
>>> thresh.inputs.spm_mat_file = 'SPM.mat'
>>> thresh.inputs.stat_image = 'spmT_0001.img'
>>> thresh.inputs.contrast_index = 1
>>> thresh.inputs.extent_fdr_p_threshold = 0.05
>>> thresh.run() # doctest: +SKIP
'''
input_spec = ThresholdInputSpec
output_spec = ThresholdOutputSpec
def _gen_thresholded_map_filename(self):
_, fname, ext = split_filename(self.inputs.stat_image)
return os.path.abspath(fname + "_thr" + ext)
def _gen_pre_topo_map_filename(self):
_, fname, ext = split_filename(self.inputs.stat_image)
return os.path.abspath(fname + "_pre_topo_thr" + ext)
def _make_matlab_command(self, _):
script = "con_index = %d;\n" % self.inputs.contrast_index
script += "cluster_forming_thr = %f;\n" % self.inputs.height_threshold
if self.inputs.use_fwe_correction:
script += "thresDesc = 'FWE';\n"
else:
script += "thresDesc = 'none';\n"
if self.inputs.use_topo_fdr:
script += "use_topo_fdr = 1;\n"
else:
script += "use_topo_fdr = 0;\n"
if self.inputs.force_activation:
script += "force_activation = 1;\n"
else:
script += "force_activation = 0;\n"
script += "cluster_extent_p_fdr_thr = %f;\n" % self.inputs.extent_fdr_p_threshold
script += "stat_filename = '%s';\n" % self.inputs.stat_image
script += "height_threshold_type = '%s';\n" % self.inputs.height_threshold_type
script += "extent_threshold = %d;\n" % self.inputs.extent_threshold
script += "load %s;\n" % self.inputs.spm_mat_file
script += """
FWHM = SPM.xVol.FWHM;
df = [SPM.xCon(con_index).eidf SPM.xX.erdf];
STAT = SPM.xCon(con_index).STAT;
R = SPM.xVol.R;
S = SPM.xVol.S;
n = 1;
switch thresDesc
case 'FWE'
cluster_forming_thr = spm_uc(cluster_forming_thr,df,STAT,R,n,S);
case 'none'
if strcmp(height_threshold_type, 'p-value')
cluster_forming_thr = spm_u(cluster_forming_thr^(1/n),df,STAT);
end
end
stat_map_vol = spm_vol(stat_filename);
[stat_map_data, stat_map_XYZmm] = spm_read_vols(stat_map_vol);
Z = stat_map_data(:)';
[x,y,z] = ind2sub(size(stat_map_data),(1:numel(stat_map_data))');
XYZ = cat(1, x', y', z');
XYZth = XYZ(:, Z >= cluster_forming_thr);
Zth = Z(Z >= cluster_forming_thr);
"""
script += "spm_write_filtered(Zth,XYZth,stat_map_vol.dim',stat_map_vol.mat,'thresholded map', '%s');\n" % self._gen_pre_topo_map_filename()
script += """
max_size = 0;
max_size_index = 0;
th_nclusters = 0;
nclusters = 0;
if isempty(XYZth)
thresholded_XYZ = [];
thresholded_Z = [];
else
if use_topo_fdr
V2R = 1/prod(FWHM(stat_map_vol.dim > 1));
[uc,Pc,ue] = spm_uc_clusterFDR(cluster_extent_p_fdr_thr,df,STAT,R,n,Z,XYZ,V2R,cluster_forming_thr);
end
voxel_labels = spm_clusters(XYZth);
nclusters = max(voxel_labels);
thresholded_XYZ = [];
thresholded_Z = [];
for i = 1:nclusters
cluster_size = sum(voxel_labels==i);
if cluster_size > extent_threshold && (~use_topo_fdr || (cluster_size - uc) > -1)
thresholded_XYZ = cat(2, thresholded_XYZ, XYZth(:,voxel_labels == i));
thresholded_Z = cat(2, thresholded_Z, Zth(voxel_labels == i));
th_nclusters = th_nclusters + 1;
end
if force_activation
cluster_sum = sum(Zth(voxel_labels == i));
if cluster_sum > max_size
max_size = cluster_sum;
max_size_index = i;
end
end
end
end
activation_forced = 0;
if isempty(thresholded_XYZ)
if force_activation && max_size ~= 0
thresholded_XYZ = XYZth(:,voxel_labels == max_size_index);
thresholded_Z = Zth(voxel_labels == max_size_index);
th_nclusters = 1;
activation_forced = 1;
else
thresholded_Z = [0];
thresholded_XYZ = [1 1 1]';
th_nclusters = 0;
end
end
fprintf('activation_forced = %d\\n',activation_forced);
fprintf('pre_topo_n_clusters = %d\\n',nclusters);
fprintf('n_clusters = %d\\n',th_nclusters);
fprintf('cluster_forming_thr = %f\\n',cluster_forming_thr);
"""
script += "spm_write_filtered(thresholded_Z,thresholded_XYZ,stat_map_vol.dim',stat_map_vol.mat,'thresholded map', '%s');\n" % self._gen_thresholded_map_filename()
return script
def aggregate_outputs(self, runtime=None):
outputs = self._outputs()
setattr(outputs, 'thresholded_map', self._gen_thresholded_map_filename())
setattr(outputs, 'pre_topo_fdr_map', self._gen_pre_topo_map_filename())
for line in runtime.stdout.split('\n'):
if line.startswith("activation_forced = "):
setattr(outputs, 'activation_forced', line[len("activation_forced = "):].strip() == "1")
elif line.startswith("n_clusters = "):
setattr(outputs, 'n_clusters', int(line[len("n_clusters = "):].strip()))
elif line.startswith("pre_topo_n_clusters = "):
setattr(outputs, 'pre_topo_n_clusters', int(line[len("pre_topo_n_clusters = "):].strip()))
elif line.startswith("cluster_forming_thr = "):
setattr(outputs, 'cluster_forming_thr', float(line[len("cluster_forming_thr = "):].strip()))
return outputs
def _list_outputs(self):
outputs = self._outputs().get()
outputs['thresholded_map'] = self._gen_thresholded_map_filename()
outputs['pre_topo_fdr_map'] = self._gen_pre_topo_map_filename()
return outputs
class ThresholdStatisticsInputSpec(SPMCommandInputSpec):
spm_mat_file = File(exists=True, desc='absolute path to SPM.mat', copyfile=True, mandatory=True)
stat_image = File(exists=True, desc='stat image', copyfile=False, mandatory=True)
contrast_index = traits.Int(mandatory=True, desc='which contrast in the SPM.mat to use')
height_threshold = traits.Float(desc="stat value for initial thresholding (defining clusters)", mandatory=True)
extent_threshold = traits.Int(0, usedefault=True, desc="Minimum cluster size in voxels")
class ThresholdStatisticsOutputSpec(TraitedSpec):
voxelwise_P_Bonf = traits.Float()
voxelwise_P_RF = traits.Float()
voxelwise_P_uncor = traits.Float()
voxelwise_P_FDR = traits.Float()
clusterwise_P_RF = traits.Float()
clusterwise_P_FDR = traits.Float()
class ThresholdStatistics(SPMCommand):
'''Given height and cluster size threshold calculate theoretical probabilities
concerning false positives
Examples
--------
>>> thresh = ThresholdStatistics()
>>> thresh.inputs.spm_mat_file = 'SPM.mat'
>>> thresh.inputs.stat_image = 'spmT_0001.img'
>>> thresh.inputs.contrast_index = 1
>>> thresh.inputs.height_threshold = 4.56
>>> thresh.run() # doctest: +SKIP
'''
input_spec = ThresholdStatisticsInputSpec
output_spec = ThresholdStatisticsOutputSpec
def _make_matlab_command(self, _):
script = "con_index = %d;\n" % self.inputs.contrast_index
script += "cluster_forming_thr = %f;\n" % self.inputs.height_threshold
script += "stat_filename = '%s';\n" % self.inputs.stat_image
script += "extent_threshold = %d;\n" % self.inputs.extent_threshold
script += "load '%s'\n" % self.inputs.spm_mat_file
script += """
FWHM = SPM.xVol.FWHM;
df = [SPM.xCon(con_index).eidf SPM.xX.erdf];
STAT = SPM.xCon(con_index).STAT;
R = SPM.xVol.R;
S = SPM.xVol.S;
n = 1;
voxelwise_P_Bonf = spm_P_Bonf(cluster_forming_thr,df,STAT,S,n)
voxelwise_P_RF = spm_P_RF(1,0,cluster_forming_thr,df,STAT,R,n)
stat_map_vol = spm_vol(stat_filename);
[stat_map_data, stat_map_XYZmm] = spm_read_vols(stat_map_vol);
Z = stat_map_data(:);
Zum = Z;
switch STAT
case 'Z'
VPs = (1-spm_Ncdf(Zum)).^n;
voxelwise_P_uncor = (1-spm_Ncdf(cluster_forming_thr)).^n
case 'T'
VPs = (1 - spm_Tcdf(Zum,df(2))).^n;
voxelwise_P_uncor = (1 - spm_Tcdf(cluster_forming_thr,df(2))).^n
case 'X'
VPs = (1-spm_Xcdf(Zum,df(2))).^n;
voxelwise_P_uncor = (1-spm_Xcdf(cluster_forming_thr,df(2))).^n
case 'F'
VPs = (1 - spm_Fcdf(Zum,df)).^n;
voxelwise_P_uncor = (1 - spm_Fcdf(cluster_forming_thr,df)).^n
end
VPs = sort(VPs);
voxelwise_P_FDR = spm_P_FDR(cluster_forming_thr,df,STAT,n,VPs)
V2R = 1/prod(FWHM(stat_map_vol.dim > 1));
clusterwise_P_RF = spm_P_RF(1,extent_threshold*V2R,cluster_forming_thr,df,STAT,R,n)
[x,y,z] = ind2sub(size(stat_map_data),(1:numel(stat_map_data))');
XYZ = cat(1, x', y', z');
[u, CPs, ue] = spm_uc_clusterFDR(0.05,df,STAT,R,n,Z,XYZ,V2R,cluster_forming_thr);
clusterwise_P_FDR = spm_P_clusterFDR(extent_threshold*V2R,df,STAT,R,n,cluster_forming_thr,CPs')
"""
return script
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
cur_output = ""
for line in runtime.stdout.split('\n'):
if cur_output != "" and len(line.split()) != 0:
setattr(outputs, cur_output, float(line))
cur_output = ""
continue
if len(line.split()) != 0 and line.split()[0] in ["clusterwise_P_FDR", "clusterwise_P_RF", "voxelwise_P_Bonf", "voxelwise_P_FDR",
"voxelwise_P_RF", "voxelwise_P_uncor"]:
cur_output = line.split()[0]
continue
return outputs
class FactorialDesignInputSpec(SPMCommandInputSpec):
spm_mat_dir = Directory(exists=True, field='dir', desc='directory to store SPM.mat file (opt)')
# really need to make an alias of InputMultiPath because the inputs below are not Path
covariates = InputMultiPath(traits.Dict(key_trait=traits.Enum('vector', 'name',
'interaction', 'centering')),
field='cov',
desc='covariate dictionary {vector, name, interaction, centering}')
threshold_mask_none = traits.Bool(field='masking.tm.tm_none',
xor=['threshold_mask_absolute', 'threshold_mask_relative'],
desc='do not use threshold masking')
threshold_mask_absolute = traits.Float(field='masking.tm.tma.athresh',
xor=['threshold_mask_none', 'threshold_mask_relative'],
desc='use an absolute threshold')
threshold_mask_relative = traits.Float(field='masking.tm.tmr.rthresh',
xor=['threshold_mask_absolute', 'threshold_mask_none'],
desc='threshold using a proportion of the global value')
use_implicit_threshold = traits.Bool(field='masking.im',
desc='use implicit mask NaNs or zeros to threshold')
explicit_mask_file = File(field='masking.em', #requires cell
desc='use an implicit mask file to threshold')
global_calc_omit = traits.Bool(field='globalc.g_omit',
xor=['global_calc_mean', 'global_calc_values'],
desc='omit global calculation')
global_calc_mean = traits.Bool(field='globalc.g_mean',
xor=['global_calc_omit', 'global_calc_values'],
desc='use mean for global calculation')
global_calc_values = traits.List(traits.Float, field='globalc.g_user.global_uval',
xor=['global_calc_mean', 'global_calc_omit'],
desc='omit global calculation')
no_grand_mean_scaling = traits.Bool(field='globalm.gmsca.gmsca_no',
desc='do not perform grand mean scaling')
global_normalization = traits.Enum(1, 2, 3, field='globalm.glonorm',
desc='global normalization None-1, Proportional-2, ANCOVA-3')
class FactorialDesignOutputSpec(TraitedSpec):
spm_mat_file = File(exists=True, desc='SPM mat file')
class FactorialDesign(SPMCommand):
"""Base class for factorial designs
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=79
"""
input_spec = FactorialDesignInputSpec
output_spec = FactorialDesignOutputSpec
_jobtype = 'stats'
_jobname = 'factorial_design'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['spm_mat_dir', 'explicit_mask_file']:
return np.array([str(val)], dtype=object)
if opt in ['covariates']:
outlist = []
mapping = {'name': 'cname', 'vector': 'c',
'interactions': 'iCFI',
'centering': 'iCC'}
for dictitem in val:
outdict = {}
for key, keyval in dictitem.items():
outdict[mapping[key]] = keyval
outlist.append(outdict)
return outlist
return val
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(FactorialDesign, self)._parse_inputs()
if not isdefined(self.inputs.spm_mat_dir):
einputs[0]['dir'] = np.array([str(os.getcwd())], dtype=object)
return einputs
def _list_outputs(self):
outputs = self._outputs().get()
spm = os.path.join(os.getcwd(), 'SPM.mat')
outputs['spm_mat_file'] = spm
return outputs
class OneSampleTTestDesignInputSpec(FactorialDesignInputSpec):
in_files = traits.List(File(exists=True), field='des.t1.scans',
mandatory=True, minlen=2,
desc='input files')
class OneSampleTTestDesign(FactorialDesign):
"""Create SPM design for one sample t-test
Examples
--------
>>> ttest = OneSampleTTestDesign()
>>> ttest.inputs.in_files = ['cont1.nii', 'cont2.nii']
>>> ttest.run() # doctest: +SKIP
"""
input_spec = OneSampleTTestDesignInputSpec
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['in_files']:
return np.array(val, dtype=object)
return super(OneSampleTTestDesign, self)._format_arg(opt, spec, val)
class TwoSampleTTestDesignInputSpec(FactorialDesignInputSpec):
# very unlikely that you will have a single image in one group, so setting
# parameters to require at least two files in each group [SG]
group1_files = traits.List(File(exists=True), field='des.t2.scans1',
mandatory=True, minlen=2,
desc='Group 1 input files')
group2_files = traits.List(File(exists=True), field='des.t2.scans2',
mandatory=True, minlen=2,
desc='Group 2 input files')
dependent = traits.Bool(field='des.t2.dept',
desc='Are the measurements dependent between levels')
unequal_variance = traits.Bool(field='des.t2.variance',
desc='Are the variances equal or unequal between groups')
class TwoSampleTTestDesign(FactorialDesign):
"""Create SPM design for two sample t-test
Examples
--------
>>> ttest = TwoSampleTTestDesign()
>>> ttest.inputs.group1_files = ['cont1.nii', 'cont2.nii']
>>> ttest.inputs.group2_files = ['cont1a.nii', 'cont2a.nii']
>>> ttest.run() # doctest: +SKIP
"""
input_spec = TwoSampleTTestDesignInputSpec
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['group1_files', 'group2_files']:
return np.array(val, dtype=object)
return super(TwoSampleTTestDesign, self)._format_arg(opt, spec, val)
class PairedTTestDesignInputSpec(FactorialDesignInputSpec):
paired_files = traits.List(traits.List(File(exists=True), minlen=2, maxlen=2),
field='des.pt.pair',
mandatory=True, minlen=2,
desc='List of paired files')
grand_mean_scaling = traits.Bool(field='des.pt.gmsca',
desc='Perform grand mean scaling')
ancova = traits.Bool(field='des.pt.ancova',
desc='Specify ancova-by-factor regressors')
class PairedTTestDesign(FactorialDesign):
"""Create SPM design for paired t-test
Examples
--------
>>> pttest = PairedTTestDesign()
>>> pttest.inputs.paired_files = [['cont1.nii','cont1a.nii'],['cont2.nii','cont2a.nii']]
>>> pttest.run() # doctest: +SKIP
"""
input_spec = PairedTTestDesignInputSpec
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['paired_files']:
return [dict(scans=np.array(files, dtype=object)) for files in val]
return super(PairedTTestDesign, self)._format_arg(opt, spec, val)
class MultipleRegressionDesignInputSpec(FactorialDesignInputSpec):
in_files = traits.List(File(exists=True),
field='des.mreg.scans',
mandatory=True, minlen=2,
desc='List of files')
include_intercept = traits.Bool(True, field='des.mreg.incint',
usedefault=True,
desc='Include intercept in design')
user_covariates = InputMultiPath(traits.Dict(key_trait=traits.Enum('vector',
'name',
'centering')),
field='des.mreg.mcov',
desc='covariate dictionary {vector, name, centering}')
class MultipleRegressionDesign(FactorialDesign):
"""Create SPM design for multiple regression
Examples
--------
>>> mreg = MultipleRegressionDesign()
>>> mreg.inputs.in_files = ['cont1.nii','cont2.nii']
>>> mreg.run() # doctest: +SKIP
"""
input_spec = MultipleRegressionDesignInputSpec
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['in_files']:
return np.array(val, dtype=object)
if opt in ['include_intercept']:
return int(val)
if opt in ['user_covariates']:
outlist = []
mapping = {'name': 'cname', 'vector': 'c',
'centering': 'iCC'}
for dictitem in val:
outdict = {}
for key, keyval in dictitem.items():
outdict[mapping[key]] = keyval
outlist.append(outdict)
return outlist
return super(MultipleRegressionDesign, self)._format_arg(opt, spec, val)
| {
"content_hash": "cb9195ca5c6de6996b8aa3d212a2131b",
"timestamp": "",
"source": "github",
"line_count": 906,
"max_line_length": 198,
"avg_line_length": 43.05077262693157,
"alnum_prop": 0.5625833247872013,
"repo_name": "christianbrodbeck/nipype",
"id": "cd7ce07b2eaeb19768249052a8447d0c13f55361",
"size": "39118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipype/interfaces/spm/model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Matlab",
"bytes": "282"
},
{
"name": "Objective-C",
"bytes": "4736"
},
{
"name": "Python",
"bytes": "2537426"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
import datetime
import math
import ibis.expr.types as ir
def _set_literal_format(translator, expr):
value_type = expr.type().value_type
formatted = [
translator.translate(ir.literal(x, type=value_type))
for x in expr.op().value
]
return '(' + ', '.join(formatted) + ')'
def _boolean_literal_format(translator, expr):
value = expr.op().value
return 'TRUE' if value else 'FALSE'
def _string_literal_format(translator, expr):
value = expr.op().value
return "'{}'".format(value.replace("'", "\\'"))
def _number_literal_format(translator, expr):
value = expr.op().value
if math.isfinite(value):
formatted = repr(value)
else:
if math.isnan(value):
formatted_val = 'NaN'
elif math.isinf(value):
if value > 0:
formatted_val = 'Infinity'
else:
formatted_val = '-Infinity'
formatted = f"CAST({formatted_val!r} AS DOUBLE)"
return formatted
def _interval_literal_format(translator, expr):
return 'INTERVAL {} {}'.format(
expr.op().value, expr.type().resolution.upper()
)
def _date_literal_format(translator, expr):
value = expr.op().value
if isinstance(value, datetime.date):
value = value.strftime('%Y-%m-%d')
return repr(value)
def _timestamp_literal_format(translator, expr):
value = expr.op().value
if isinstance(value, datetime.datetime):
value = value.strftime('%Y-%m-%d %H:%M:%S')
return repr(value)
literal_formatters = {
'boolean': _boolean_literal_format,
'number': _number_literal_format,
'string': _string_literal_format,
'interval': _interval_literal_format,
'timestamp': _timestamp_literal_format,
'date': _date_literal_format,
'set': _set_literal_format,
}
def literal(translator, expr):
"""Return the expression as its literal value."""
if isinstance(expr, ir.BooleanValue):
typeclass = 'boolean'
elif isinstance(expr, ir.StringValue):
typeclass = 'string'
elif isinstance(expr, ir.NumericValue):
typeclass = 'number'
elif isinstance(expr, ir.DateValue):
typeclass = 'date'
elif isinstance(expr, ir.TimestampValue):
typeclass = 'timestamp'
elif isinstance(expr, ir.IntervalValue):
typeclass = 'interval'
elif isinstance(expr, ir.SetValue):
typeclass = 'set'
else:
raise NotImplementedError
return literal_formatters[typeclass](translator, expr)
def null_literal(translator, expr):
return 'NULL'
| {
"content_hash": "19da27136ab24e315119494f644b67ea",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 60,
"avg_line_length": 25.323529411764707,
"alnum_prop": 0.6252419667053813,
"repo_name": "cloudera/ibis",
"id": "223c416770e6d6850169c78ffecb1efb12e303a7",
"size": "2583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibis/backends/base/sql/registry/literal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "44943"
},
{
"name": "CMake",
"bytes": "4383"
},
{
"name": "Python",
"bytes": "2570944"
},
{
"name": "Shell",
"bytes": "1989"
}
],
"symlink_target": ""
} |
"""
Una excepcion costumizada que es tirada por http_request_parser cuando
se encuentra con un error
"""
#Es la excepcion que tira el http_request_parser en caso de error
class HttpParseException(Exception):
pass
| {
"content_hash": "3ee715d71fdac089c82b413a8d18825b",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 70,
"avg_line_length": 27.25,
"alnum_prop": 0.7752293577981652,
"repo_name": "tupini07/StarLord",
"id": "b7072e8cd42d6a63bb2c33c68282588afe7f8907",
"size": "218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "protocolo_http/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25766"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import click
from ....cliutils import out
@click.group(help='Brands associated with your account')
def brands():
pass
@brands.command(help='List all brands')
@click.pass_obj
def list(state):
out(state, state.client.brands())
@brands.command(help='Show details about a single brand')
@click.argument('id',
type=int)
@click.pass_obj
def show(state, id):
out(state, state.client.brands(id))
@brands.command(help='List all rules associated with brand')
@click.argument('id', type=int)
@click.pass_obj
def list_rules(state, id):
out(state, state.client.brand_rules(id))
| {
"content_hash": "16dcb5a4618a7240c8d923c94d39a075",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 60,
"avg_line_length": 22.20689655172414,
"alnum_prop": 0.7003105590062112,
"repo_name": "PerformLine/python-performline-client",
"id": "0d30c9fc42b0ff37d2ccac837df08750625a8b52",
"size": "2187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "performline/products/common/cli/brands.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2103"
},
{
"name": "Python",
"bytes": "177148"
},
{
"name": "Shell",
"bytes": "413"
}
],
"symlink_target": ""
} |
import logging
import sys
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
| {
"content_hash": "95baea6a591ca460c5b6f49c8cd7a360",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 60,
"avg_line_length": 23.916666666666668,
"alnum_prop": 0.794425087108014,
"repo_name": "brata-hsdc/brata.masterserver",
"id": "7a55b33cec33a2552f007322164efb248a7b557f",
"size": "287",
"binary": false,
"copies": "1",
"ref": "refs/heads/hsdc2016",
"path": "workspace/ms/ms/site_logging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "182"
},
{
"name": "CSS",
"bytes": "67798"
},
{
"name": "HTML",
"bytes": "67747"
},
{
"name": "JavaScript",
"bytes": "396454"
},
{
"name": "PHP",
"bytes": "8473585"
},
{
"name": "Python",
"bytes": "467028"
},
{
"name": "Shell",
"bytes": "12570"
}
],
"symlink_target": ""
} |
"""
处理数据库的同步信息
"""
import urllib
from .jsonfy import jsonify
from sqlalchemy.sql import column, select, alias, join, and_
from sqlalchemy import func
class DBSync(object):
"""
同步数据库到 服务器 (Ledis)
- 目前处理为输出 记录到 JSON 文件
- 后续处理为特殊的指令
PUT type values
"""
def __init__(self, engine):
self._engine = engine
def sync_table(self, tbl_name, tbl_def):
pg_size = 1000
if tbl_name != 'Auction':
return
pk_names = [key.name for key in tbl_def.primary_key]
pk_cols = [tbl_def.c[key.name] for key in tbl_def.primary_key]
#print tbl_name, pk_names
if len(pk_names) == 0:
#print ("Plz assign primary key in %s." % tbl_name)
return
"""
SELECT * FROM `content` AS t1
JOIN (SELECT id FROM `content` ORDER BY id desc LIMIT ".($page-1)*$pagesize.", 1) AS t2
WHERE t1.id <= t2.id ORDER BY t1.id desc LIMIT $pagesize;
"""
# check rec's count.
cnt_items = dict()
conn = self._engine.connect()
# get all count.
total_count = self._engine.execute(select([func.count()]).select_from(tbl_def)).scalar()
#print total_count # the total count of the table.
def build_uri(names_, row_):
items = {}
for pk_name in names_:
items[pk_name] = (row_[pk_name]).encode('utf8')
return urllib.urlencode(items)
def get_range_data(offset_, size_):
tbl_main = alias(tbl_def, 't')
join_condition = []
pk_names_desc = [name+" DESC" for name in pk_names]
sub_q = select(pk_cols).order_by(", ".join(pk_names_desc)).offset(offset_).limit(1).alias()
for pk_name in pk_names:
item = (tbl_main.c[pk_name] <= sub_q.c[pk_name])
join_condition.append(item)
if len(join_condition) > 1:
j = join(tbl_main, sub_q, and_(*join_condition))
else:
j = join(tbl_main, sub_q, join_condition[0])
return select([tbl_main]).select_from(j).order_by(", ".join(pk_names_desc)).limit(size_)
try:
for offset in range(0, total_count, pg_size):
stmt = get_range_data(offset, pg_size)
rs = conn.execute(stmt)
for row in rs:
#print jsonify(row)
if row['ArtCode'] == 'art0000014880':
#print row, row['Click']
print jsonify(row)
#print row['WorkName'], type(row['WorkName'])
#cnt_items[build_uri(pk_names, row)] = 1
finally:
conn.close()
if False: # keep the code for [range select vs total select] data verify
conn = self._engine.connect()
try:
stmt = select([tbl_def])
rs = conn.execute(stmt)
for row in rs:
cnt_items[build_uri(pk_names, row)] += 1
finally:
conn.close()
# check
for k in cnt_items:
if cnt_items[k] != 2:
print k, '---------------'
#end of file
| {
"content_hash": "d9c676b1a335316ff358434a9d36b324",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 103,
"avg_line_length": 33.36734693877551,
"alnum_prop": 0.4975535168195719,
"repo_name": "nzinfo/ElasticSphinx",
"id": "e3daef8e45ca48da9ca0de1d40f267711d9791a7",
"size": "3380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "space/db_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "32441"
}
],
"symlink_target": ""
} |
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import unittest
import paddle
def build_and_run_program(place, batch_size, beam_size, stop_gradient=False):
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
np.random.seed(2)
x = layers.assign(
np.random.rand(batch_size, beam_size, 32).astype("float32")
)
indices = fluid.data(shape=[None, beam_size], dtype="int64", name="indices")
step_idx = layers.fill_constant(
shape=[1], dtype="int64", value=0, force_cpu=True
)
max_len = layers.fill_constant(
shape=[1], dtype="int64", value=10, force_cpu=True
)
cond = layers.less_than(x=step_idx, y=max_len)
while_op = layers.While(cond)
scores = layers.array_write(x, step_idx)
with while_op.block():
bs = layers.cast(layers.shape(x)[0], "int64")
for _ in range(20):
bs = layers.cast(bs, 'int64')
bs.stop_gradient = stop_gradient
batch_pos = layers.expand(
layers.unsqueeze(paddle.arange(0, bs, 1, dtype=bs.dtype), [1]),
[1, beam_size],
)
topk_coordinates = paddle.stack([batch_pos, indices], axis=2)
topk_coordinates.stop_gradient = stop_gradient
score = layers.gather_nd(x, topk_coordinates)
layers.increment(x=step_idx, value=1.0, in_place=True)
layers.array_write(score, i=step_idx, array=scores)
length_cond = layers.less_than(x=step_idx, y=max_len)
layers.assign(length_cond, cond)
out = layers.tensor_array_to_tensor(scores, axis=0, use_stack=True)[0]
loss = layers.reduce_mean(out)
opt = fluid.optimizer.Adam(0.01)
opt.minimize(loss)
exe = fluid.Executor(place)
data = np.random.random_integers(
low=0, high=beam_size - 1, size=(batch_size, beam_size)
).astype("int64")
(loss_val,) = exe.run(feed={"indices": data}, fetch_list=[loss])
return loss_val
class TestDynRNNStopGradient(unittest.TestCase):
def setUp(self):
self.batch_size = 20
self.beam_size = 64
def run_main(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
with fluid.scope_guard(fluid.Scope()):
value1 = build_and_run_program(
place, self.batch_size, self.beam_size, False
)
value2 = build_and_run_program(
place, self.batch_size, self.beam_size, True
)
np.testing.assert_array_equal(value1, value2)
def test_check_main(self):
places = [fluid.CPUPlace()]
if fluid.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.run_main(p)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "c57440aec4660aff43d1ecdfc65253ee",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 80,
"avg_line_length": 34.19047619047619,
"alnum_prop": 0.6058495821727019,
"repo_name": "luotao1/Paddle",
"id": "f3f971b5778d8fbefe16891c33caeaf94cae4844",
"size": "3483",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 30, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 12); | {
"content_hash": "747a1144fda69a3afc218da8ef105821",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 168,
"avg_line_length": 38.285714285714285,
"alnum_prop": 0.7089552238805971,
"repo_name": "antoinecarme/pyaf",
"id": "728cc2faf127719952ecba99d9f146f5b012a276",
"size": "268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Difference/trend_Lag1Trend/cycle_30/ar_12/test_artificial_1024_Difference_Lag1Trend_30_12_20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ShopDesign'
db.create_table('stores_shopdesign', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('store', self.gf('django.db.models.fields.related.OneToOneField')(related_name='shop_design', unique=True, to=orm['stores.Store'])),
('background_image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
('is_repeated', self.gf('django.db.models.fields.BooleanField')(default=False)),
('background_color', self.gf('stores.fields.ColorField')(default='#FFFFFF', max_length=7)),
))
db.send_create_signal('stores', ['ShopDesign'])
def backwards(self, orm):
# Deleting model 'ShopDesign'
db.delete_table('stores_shopdesign')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'stores.category': {
'Meta': {'object_name': 'Category'},
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marker': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'stores.discount': {
'Meta': {'object_name': 'Discount'},
'for_additional_buyer': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'for_additional_item': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lower_bound': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'discount_models'", 'to': "orm['stores.Store']"})
},
'stores.discountgroup': {
'Meta': {'object_name': 'DiscountGroup'},
'discount': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'discount_groups'", 'to': "orm['stores.Discount']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'stores.item': {
'Meta': {'object_name': 'Item'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'discount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'discount_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'items'", 'null': 'True', 'to': "orm['stores.DiscountGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_out_of_stock': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['stores.Store']"})
},
'stores.itemimage': {
'Meta': {'object_name': 'ItemImage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['stores.Item']"})
},
'stores.shopdesign': {
'Meta': {'object_name': 'ShopDesign'},
'background_color': ('stores.fields.ColorField', [], {'default': "'#FFFFFF'", 'max_length': '7'}),
'background_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_repeated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'store': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'shop_design'", 'unique': 'True', 'to': "orm['stores.Store']"})
},
'stores.shoppingregion': {
'Meta': {'object_name': 'ShoppingRegion'},
'center': ('django.contrib.gis.db.models.fields.PointField', [], {'spatial_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'zoom': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'stores.store': {
'Meta': {'object_name': 'Store'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stores'", 'to': "orm['stores.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'paypal_email': ('django.db.models.fields.EmailField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['stores.ShoppingRegion']", 'null': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'store'", 'unique': 'True', 'to': "orm['auth.User']"}),
'window_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['stores']
| {
"content_hash": "9f2767a52343921af47c1ee797911517",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 182,
"avg_line_length": 72.19852941176471,
"alnum_prop": 0.5529076280680314,
"repo_name": "softak/webfaction_demo",
"id": "d44e04b60ed25f6ed3466fff0a95d89d2501485b",
"size": "9837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/stores/migrations/0031_auto__add_shopdesign.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "33283"
},
{
"name": "JavaScript",
"bytes": "984889"
},
{
"name": "Python",
"bytes": "8055804"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
} |
import random
from string import ascii_letters
from django.contrib.auth import get_user_model
from tidings.compat import range
from tidings.models import Watch, WatchFilter
def user(save=False, **kwargs):
defaults = {'password':
'sha1$d0fcb$661bd5197214051ed4de6da4ecdabe17f5549c7c'}
if 'username' not in kwargs:
defaults['username'] = ''.join(random.choice(ascii_letters)
for x in range(15))
defaults.update(kwargs)
u = get_user_model()(**defaults)
if save:
u.save()
return u
def watch(save=False, **kwargs):
# TODO: better defaults, when there are events available.
defaults = {'user': kwargs.get('user') or user(save=True),
'is_active': True,
'secret': 'abcdefghjk'}
defaults.update(kwargs)
w = Watch.objects.create(**defaults)
if save:
w.save()
return w
def watch_filter(save=False, **kwargs):
defaults = {'watch': kwargs.get('watch') or watch(save=True),
'name': 'test',
'value': 1234}
defaults.update(kwargs)
f = WatchFilter.objects.create(**defaults)
if save:
f.save()
return f
| {
"content_hash": "5e85d1e3b98dde913e7e17d5ff4978e0",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 70,
"avg_line_length": 28.186046511627907,
"alnum_prop": 0.6014851485148515,
"repo_name": "mozilla/django-tidings",
"id": "d95282bb046e9d2d0010355a7ad4c69b1274a5ba",
"size": "1212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1764"
},
{
"name": "Makefile",
"bytes": "2018"
},
{
"name": "Python",
"bytes": "75945"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import collections
from dashboard.pinpoint.models.compare import kolmogorov_smirnov
from dashboard.pinpoint.models.compare import mann_whitney_u
from dashboard.pinpoint.models.compare import thresholds
DIFFERENT = 'different'
PENDING = 'pending'
SAME = 'same'
UNKNOWN = 'unknown'
class ComparisonResults(
collections.namedtuple(
'ComparisonResults',
('result', 'p_value', 'low_threshold', 'high_threshold'))):
__slots__ = ()
# TODO(https://crbug.com/1051710): Make this return all the values useful in
# decision making (and display).
def Compare(values_a, values_b, attempt_count, mode, magnitude):
"""Decide whether two samples are the same, different, or unknown.
Arguments:
values_a: A list of sortable values. They don't need to be numeric.
values_b: A list of sortable values. They don't need to be numeric.
attempt_count: The average number of attempts made.
mode: 'functional' or 'performance'. We use different significance
thresholds for each type.
magnitude: An estimate of the size of differences to look for. We need more
values to find smaller differences. If mode is 'functional', this is the
failure rate, a float between 0 and 1. If mode is 'performance', this is a
multiple of the interquartile range (IQR).
Returns:
A tuple `ComparisonResults` which contains the following elements:
* result: one of the following values:
DIFFERENT: The samples are unlikely to come from the same
distribution, and are therefore likely different. Reject
the null hypothesis.
SAME : The samples are unlikely to come from distributions that
differ by the given magnitude. Cannot reject the null
hypothesis.
UNKNOWN : Not enough evidence to reject either hypothesis. We should
collect more data before making a final decision.
* p_value: the consolidated p-value for the statistical tests used in the
implementation.
* low_threshold: the `alpha` where if the p-value is lower means we can
reject the null hypothesis.
* high_threshold: the `alpha` where if the p-value is lower means we need
more information to make a definitive judgement.
"""
low_threshold = thresholds.LowThreshold()
high_threshold = thresholds.HighThreshold(mode, magnitude, attempt_count)
if not (values_a and values_b):
# A sample has no values in it.
return ComparisonResults(UNKNOWN, None, low_threshold, high_threshold)
# MWU is bad at detecting changes in variance, and K-S is bad with discrete
# distributions. So use both. We want low p-values for the below examples.
# a b MWU(a, b) KS(a, b)
# [0]*20 [0]*15+[1]*5 0.0097 0.4973
# range(10, 30) range(10)+range(30, 40) 0.4946 0.0082
p_value = min(
kolmogorov_smirnov.KolmogorovSmirnov(values_a, values_b),
mann_whitney_u.MannWhitneyU(values_a, values_b))
if p_value <= low_threshold:
# The p-value is less than the significance level. Reject the null
# hypothesis.
return ComparisonResults(DIFFERENT, p_value, low_threshold, high_threshold)
if p_value <= thresholds.HighThreshold(mode, magnitude, attempt_count):
# The p-value is not less than the significance level, but it's small
# enough to be suspicious. We'd like to investigate more closely.
return ComparisonResults(UNKNOWN, p_value, low_threshold, high_threshold)
# The p-value is quite large. We're not suspicious that the two samples might
# come from different distributions, and we don't care to investigate more.
return ComparisonResults(SAME, p_value, low_threshold, high_threshold)
| {
"content_hash": "d5b55ec573b0b8750b2e23f1438d68c7",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 80,
"avg_line_length": 46.04651162790697,
"alnum_prop": 0.681060606060606,
"repo_name": "endlessm/chromium-browser",
"id": "be7928a284cf4395aca01482430ad092ce65d3f6",
"size": "4123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/catapult/dashboard/dashboard/pinpoint/models/compare/compare.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from os import sys
from data_gen import *
from knn import *
import random as rd
import util as ut
import scenario_util as su
params = None
try:
params = ut.read_params(sys.argv[1], ignore_lines = '#')
except:
params = ut.read_params(sys.argv[1], ignore_lines = '#')
# Get params when possible from the set of params, otherwise
# return the specified default.
def p(param_name, default):
try:
return params[param_name]
except:
return default
# Main parameters.
num_trials = p('num_trials', 5)
baseline = p('baseline_prob', 0.02)
num_user_atts, min_user_att_levels, max_user_att_levels = p('user_attribute_spec', (4, 2, 4))
num_msg_atts, min_msg_att_levels, max_msg_att_levels = p('msg_attribute_spec', (4, 2, 4))
num_propensity_groups = p('num_propensity_groups', 5)
min_group_user_atts, max_group_user_atts = p('minmax_user_propensity_attrs_involved', (3, 4))
min_group_msg_atts, max_group_msg_atts = p('minmax_msg_propensity_attrs_involved', (2, 4))
min_group_pos_prob, max_group_pos_prob = p('minmax_propensity_group_response_prob', (0.2, 0.85))
num_users = p('num_users', 1000)
num_test_messages = p('num_test_messages', 100)
output_file = p('output_file', None)
# Initializer function
def trial_init(recdr, logr):
logr.log('Initializing new trial...', 'standard')
b = DataGenerator()
b.set_baseline_response_prob(baseline)
b.add_random_user_attrs(num_user_atts, min_user_att_levels, max_user_att_levels)
b.add_random_inter_attrs(num_msg_atts, min_msg_att_levels, max_msg_att_levels)
templates = b.set_random_propensities(num_propensity_groups,
min_group_user_atts, max_group_user_atts,
min_group_msg_atts, max_group_msg_atts,
min_group_pos_prob, max_group_pos_prob)
# -> Returns: a pair (user templates, interaction templates)
logr.log('Generating data...', 'standard')
messages = b.gen_random_inters(num_test_messages)
users = b.gen_random_users(num_users)
rows = ut.unzip(b.gen_random_rows_from(users, messages))
logr.log('Number of rows: ' + str(len(rows)), 'standard')
# Split data into train, calibration, and test.
train, calibrate, test = ut.split_data(rows, 0.5, 0.25, 0.25)
calibration_users = map(lambda (u, m, r): u, calibrate)
test_users = map(lambda (u, m, r): u, test)
controls = su.build_std_control_solvers(calibrate, b, messages, 15)
treatments = su.build_std_knn_optims(train, calibrate, b, recorder, 1, 15)
solvers = controls + treatments
return (train, test_users, b, solvers)
logger = su.BasicLogger()
recorder = su.ScenarioRecorder()
su.run_trials(trial_init, su.standard_analyzer_f, num_trials, recorder, logger)
if output_file != None:
logger.write(output_file)
| {
"content_hash": "08a50090ae1db771d8131df46a383557",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 96,
"avg_line_length": 40.303030303030305,
"alnum_prop": 0.706766917293233,
"repo_name": "chrisgarcia001/Jepson-2014-2015",
"id": "641af19aa742b0b263e6a050e15cbac771c8be43",
"size": "3033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scenario_runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "638"
},
{
"name": "Python",
"bytes": "35727"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.