text stringlengths 4 1.02M | meta dict |
|---|---|
import curio
from ... import gvars
from ...utils import pack_addr, unpack_addr
from ..base.udpclient import UDPClient
class SSUDPClient(UDPClient):
proto = "SS(UDP)"
async def sendto(self, data, addr):
self.target_addr = addr
iv, encrypt = self.ns.cipher.make_encrypter()
payload = iv + encrypt(pack_addr(addr) + data)
await self.sock.sendto(payload, self.ns.bind_addr)
def _unpack(self, data):
iv = data[: self.ns.cipher.IV_SIZE]
decrypt = self.ns.cipher.make_decrypter(iv)
data = decrypt(data[self.ns.cipher.IV_SIZE :])
addr, payload = unpack_addr(data)
return addr, payload
async def _relay(self, addr, sendfrom):
try:
while True:
data, raddr = await self.sock.recvfrom(gvars.PACKET_SIZE)
_, payload = self._unpack(data)
await sendfrom(payload, addr)
except curio.errors.CancelledError:
pass
| {
"content_hash": "82a79d1e6f7a70baed27ece2505f14b3",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 73,
"avg_line_length": 31.516129032258064,
"alnum_prop": 0.6059365404298874,
"repo_name": "guyingbo/shadowproxy",
"id": "507e0946d7f5bc4edcc137902d600a0012720dd1",
"size": "977",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "shadowproxy/proxies/shadowsocks/udpclient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "127166"
},
{
"name": "Shell",
"bytes": "1809"
}
],
"symlink_target": ""
} |
"""Constants used by Advantage Air integration."""
DOMAIN = "advantage_air"
ADVANTAGE_AIR_RETRY = 5
ADVANTAGE_AIR_STATE_OPEN = "open"
ADVANTAGE_AIR_STATE_CLOSE = "close"
ADVANTAGE_AIR_STATE_ON = "on"
ADVANTAGE_AIR_STATE_OFF = "off"
| {
"content_hash": "6678a8fae364878b80cfdab5b86ec0eb",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 50,
"avg_line_length": 33.142857142857146,
"alnum_prop": 0.7327586206896551,
"repo_name": "sdague/home-assistant",
"id": "078c266bfb0d03456b4eacc54cbb665b4eebd7ca",
"size": "232",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/advantage_air/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "27869189"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class SoaRecord(Model):
"""An SOA record.
:param host: The domain name of the authoritative name server for this SOA
record.
:type host: str
:param email: The email contact for this SOA record.
:type email: str
:param serial_number: The serial number for this SOA record.
:type serial_number: long
:param refresh_time: The refresh value for this SOA record.
:type refresh_time: long
:param retry_time: The retry time for this SOA record.
:type retry_time: long
:param expire_time: The expire time for this SOA record.
:type expire_time: long
:param minimum_ttl: The minimum value for this SOA record. By convention
this is used to determine the negative caching duration.
:type minimum_ttl: long
"""
_attribute_map = {
'host': {'key': 'host', 'type': 'str'},
'email': {'key': 'email', 'type': 'str'},
'serial_number': {'key': 'serialNumber', 'type': 'long'},
'refresh_time': {'key': 'refreshTime', 'type': 'long'},
'retry_time': {'key': 'retryTime', 'type': 'long'},
'expire_time': {'key': 'expireTime', 'type': 'long'},
'minimum_ttl': {'key': 'minimumTTL', 'type': 'long'},
}
def __init__(self, host=None, email=None, serial_number=None, refresh_time=None, retry_time=None, expire_time=None, minimum_ttl=None):
self.host = host
self.email = email
self.serial_number = serial_number
self.refresh_time = refresh_time
self.retry_time = retry_time
self.expire_time = expire_time
self.minimum_ttl = minimum_ttl
| {
"content_hash": "404f7aa65a9f68486b665bfebf7719cb",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 138,
"avg_line_length": 39.26190476190476,
"alnum_prop": 0.6252274105518496,
"repo_name": "v-iam/azure-sdk-for-python",
"id": "fec121ed625d1e5fb016984c5ee69ab95ea5ca30",
"size": "2123",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "azure-mgmt-dns/azure/mgmt/dns/models/soa_record.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19856874"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.scenario import manager
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestVolumeMigrateRetypeAttached(manager.ScenarioTest):
"""This test case attempts to reproduce the following steps:
* Create 2 volume types representing 2 different backends
* Create in Cinder some bootable volume importing a Glance image using
* volume_type_1
* Boot an instance from the bootable volume
* Write to the volume
* Perform a cinder retype --on-demand of the volume to type of backend #2
* Check written content of migrated volume
* Check the type of the volume has been updated.
* Check the volume is still in-use and the migration was successful.
* Check that the same volume is attached to the instance.
"""
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(TestVolumeMigrateRetypeAttached, cls).setup_clients()
cls.admin_volumes_client = cls.os_admin.volumes_client_latest
@classmethod
def skip_checks(cls):
super(TestVolumeMigrateRetypeAttached, cls).skip_checks()
if not CONF.volume_feature_enabled.multi_backend:
raise cls.skipException("Cinder multi-backend feature disabled")
if len(set(CONF.volume.backend_names)) < 2:
raise cls.skipException("Requires at least two different "
"backend names")
def _boot_instance_from_volume(self, vol_id, keypair, security_group):
key_name = keypair['name']
security_groups = [{'name': security_group['name']}]
block_device_mapping = [{'device_name': 'vda', 'volume_id': vol_id,
'delete_on_termination': False}]
return self.create_server(image_id='',
key_name=key_name,
security_groups=security_groups,
block_device_mapping=block_device_mapping)
def _create_volume_types(self):
backend_names = CONF.volume.backend_names
backend_source = backend_names[0]
backend_dest = backend_names[1]
source_body = self.create_volume_type(backend_name=backend_source)
dest_body = self.create_volume_type(backend_name=backend_dest)
LOG.info("Created Volume types: %(src)s -> %(src_backend)s, %(dst)s "
"-> %(dst_backend)s", {'src': source_body['name'],
'src_backend': backend_source,
'dst': dest_body['name'],
'dst_backend': backend_dest})
return ({'name': source_body['name'], 'host': backend_source},
{'name': dest_body['name'], 'host': backend_dest})
def _volume_retype_with_migration(self, volume_id, new_volume_type):
# NOTE: The 'on-demand' migration requires admin operation, so
# admin_volumes_client() should be used here.
migration_policy = 'on-demand'
self.admin_volumes_client.retype_volume(
volume_id, new_type=new_volume_type,
migration_policy=migration_policy)
waiters.wait_for_volume_retype(self.volumes_client,
volume_id, new_volume_type)
@decorators.attr(type='slow')
@decorators.idempotent_id('deadd2c2-beef-4dce-98be-f86765ff311b')
@utils.services('compute', 'volume')
def test_volume_retype_attached(self):
LOG.info("Creating keypair and security group")
keypair = self.create_keypair()
security_group = self._create_security_group()
# create volume types
LOG.info("Creating Volume types")
source_type, dest_type = self._create_volume_types()
# create an instance from volume
LOG.info("Booting instance from volume")
volume_id = self.create_volume(imageRef=CONF.compute.image_ref,
volume_type=source_type['name'])['id']
instance = self._boot_instance_from_volume(volume_id, keypair,
security_group)
# write content to volume on instance
LOG.info("Setting timestamp in instance %s", instance['id'])
ip_instance = self.get_server_ip(instance)
timestamp = self.create_timestamp(ip_instance,
private_key=keypair['private_key'],
server=instance)
# retype volume with migration from backend #1 to backend #2
LOG.info("Retyping Volume %s to new type %s", volume_id,
dest_type['name'])
# This method calls for the retype of the volume before calling a
# waiter that asserts that the volume type has changed successfully.
self._volume_retype_with_migration(volume_id, dest_type['name'])
# check the content of written file
LOG.info("Getting timestamp in postmigrated instance %s",
instance['id'])
timestamp2 = self.get_timestamp(ip_instance,
private_key=keypair['private_key'],
server=instance)
self.assertEqual(timestamp, timestamp2)
# Assert that the volume is on the new host, is still in-use and has a
# migration_status of success
volume = self.admin_volumes_client.show_volume(volume_id)['volume']
# dest_type is host@backend, os-vol-host-attr:host is host@backend#type
self.assertIn(dest_type['host'], volume['os-vol-host-attr:host'])
self.assertEqual('in-use', volume['status'])
self.assertEqual('success', volume['migration_status'])
# Assert that the same volume id is attached to the instance, ensuring
# the os-migrate_volume_completion Cinder API has been called.
attached_volumes = self.servers_client.list_volume_attachments(
instance['id'])['volumeAttachments']
self.assertEqual(volume_id, attached_volumes[0]['id'])
@decorators.attr(type='slow')
@decorators.idempotent_id('fe47b1ed-640e-4e3b-a090-200e25607362')
@utils.services('compute', 'volume')
def test_volume_migrate_attached(self):
LOG.info("Creating keypair and security group")
keypair = self.create_keypair()
security_group = self._create_security_group()
LOG.info("Creating volume")
# Create a unique volume type to avoid using the backend default
migratable_type = self.create_volume_type()['name']
volume_id = self.create_volume(imageRef=CONF.compute.image_ref,
volume_type=migratable_type)['id']
volume = self.admin_volumes_client.show_volume(volume_id)
LOG.info("Booting instance from volume")
instance = self._boot_instance_from_volume(volume_id, keypair,
security_group)
# Identify the source and destination hosts for the migration
src_host = volume['volume']['os-vol-host-attr:host']
# Select the first c-vol host that isn't hosting the volume as the dest
# host['host_name'] should take the format of host@backend.
# src_host should take the format of host@backend#type
hosts = self.admin_volumes_client.list_hosts()['hosts']
for host in hosts:
if (host['service'] == 'cinder-volume' and
not src_host.startswith(host['host_name'])):
dest_host = host['host_name']
break
ip_instance = self.get_server_ip(instance)
timestamp = self.create_timestamp(ip_instance,
private_key=keypair['private_key'],
server=instance)
LOG.info("Migrating Volume %s from host %s to host %s",
volume_id, src_host, dest_host)
self.admin_volumes_client.migrate_volume(volume_id, host=dest_host)
# This waiter asserts that the migration_status is success and that
# the volume has moved to the dest_host
waiters.wait_for_volume_migration(self.admin_volumes_client, volume_id,
dest_host)
# check the content of written file
LOG.info("Getting timestamp in postmigrated instance %s",
instance['id'])
timestamp2 = self.get_timestamp(ip_instance,
private_key=keypair['private_key'],
server=instance)
self.assertEqual(timestamp, timestamp2)
# Assert that the volume is in-use
volume = self.admin_volumes_client.show_volume(volume_id)['volume']
self.assertEqual('in-use', volume['status'])
# Assert that the same volume id is attached to the instance, ensuring
# the os-migrate_volume_completion Cinder API has been called
attached_volumes = self.servers_client.list_volume_attachments(
instance['id'])['volumeAttachments']
attached_volume_id = attached_volumes[0]['id']
self.assertEqual(volume_id, attached_volume_id)
| {
"content_hash": "a62e85bfdfd6d7b2b9ee433b54668cc8",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 79,
"avg_line_length": 46.166666666666664,
"alnum_prop": 0.6036313442344446,
"repo_name": "cisco-openstack/tempest",
"id": "106500e5ad0520f1b0f26ba68601dfdaa5080bf4",
"size": "9991",
"binary": false,
"copies": "2",
"ref": "refs/heads/proposed",
"path": "tempest/scenario/test_volume_migrate_attached.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4431271"
},
{
"name": "Shell",
"bytes": "7435"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import # for logging import below
import datetime
import logging
from django.conf import settings
from django.shortcuts import redirect, resolve_url
from django.template.defaultfilters import date as date_filter
from django.utils import six
from django.utils.http import is_safe_url
from django.utils.module_loading import import_string
from django.utils.text import slugify as django_slugify
from django.utils.timezone import get_current_timezone, is_naive, make_aware
from unidecode import unidecode
def default_slugifier(value):
"""
Oscar's default slugifier function.
Uses Django's slugify function, but first applies unidecode() to convert
non-ASCII strings to ASCII equivalents where possible, if it was not
allowed to use unicode in slugs. To keep backwards compatibility with
Django<1.9, we pass `allow_unicode` only if case it was enabled in
settings.
"""
if settings.OSCAR_SLUG_ALLOW_UNICODE:
return django_slugify(value, allow_unicode=True)
return django_slugify(value)
def slugify(value):
"""
Slugify a string (even if it contains non-ASCII chars)
"""
# Re-map some strings to avoid important characters being stripped. Eg
# remap 'c++' to 'cpp' otherwise it will become 'c'.
for k, v in settings.OSCAR_SLUG_MAP.items():
value = value.replace(k, v)
# Allow an alternative slugify function to be specified
# Recommended way to specify a function is as a string
slugifier = getattr(settings, 'OSCAR_SLUG_FUNCTION', default_slugifier)
if isinstance(slugifier, six.string_types):
slugifier = import_string(slugifier)
# Use unidecode to convert non-ASCII strings to ASCII equivalents where
# possible if unicode is not allowed to contain in slug.
if not settings.OSCAR_SLUG_ALLOW_UNICODE:
value = unidecode(six.text_type(value))
value = slugifier(six.text_type(value))
# Remove stopwords
for word in settings.OSCAR_SLUG_BLACKLIST:
value = value.replace(word + '-', '')
value = value.replace('-' + word, '')
return value
def format_datetime(dt, format=None):
"""
Takes an instance of datetime, converts it to the current timezone and
formats it as a string. Use this instead of
django.core.templatefilters.date, which expects localtime.
:param format: Common will be settings.DATETIME_FORMAT or
settings.DATE_FORMAT, or the resp. shorthands
('DATETIME_FORMAT', 'DATE_FORMAT')
"""
if is_naive(dt):
localtime = make_aware(dt, get_current_timezone())
logging.warning(
"oscar.core.utils.format_datetime received native datetime")
else:
localtime = dt.astimezone(get_current_timezone())
return date_filter(localtime, format)
def datetime_combine(date, time):
"""Timezone aware version of `datetime.datetime.combine`"""
return make_aware(
datetime.datetime.combine(date, time), get_current_timezone())
def safe_referrer(request, default):
"""
Takes the request and a default URL. Returns HTTP_REFERER if it's safe
to use and set, and the default URL otherwise.
The default URL can be a model with get_absolute_url defined, a urlname
or a regular URL
"""
referrer = request.META.get('HTTP_REFERER')
if referrer and is_safe_url(referrer, request.get_host()):
return referrer
if default:
# Try to resolve. Can take a model instance, Django URL name or URL.
return resolve_url(default)
else:
# Allow passing in '' and None as default
return default
def redirect_to_referrer(request, default):
"""
Takes request.META and a default URL to redirect to.
Returns a HttpResponseRedirect to HTTP_REFERER if it exists and is a safe
URL; to the default URL otherwise.
"""
return redirect(safe_referrer(request, default))
def get_default_currency():
"""
For use as the default value for currency fields. Use of this function
prevents Django's core migration engine from interpreting a change to
OSCAR_DEFAULT_CURRENCY as something it needs to generate a migration for.
"""
return settings.OSCAR_DEFAULT_CURRENCY
| {
"content_hash": "2b33fcd538d917be14140b7158434a72",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 77,
"avg_line_length": 35.55,
"alnum_prop": 0.6997187060478199,
"repo_name": "sonofatailor/django-oscar",
"id": "27c44354e0e11ab31fe58ab5ebb57bafaa81b1ed",
"size": "4266",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/oscar/core/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "939276"
},
{
"name": "HTML",
"bytes": "522590"
},
{
"name": "JavaScript",
"bytes": "271655"
},
{
"name": "Makefile",
"bytes": "2322"
},
{
"name": "Python",
"bytes": "1887022"
},
{
"name": "Shell",
"bytes": "1642"
}
],
"symlink_target": ""
} |
"""
SmallNet.py is a class that provides a network for Person localization
Created 6/13/17.
"""
__author__ = "Alexander Ponamarev"
__email__ = "alex.ponamaryov@gmail.com"
import tensorflow as tf
from collections import namedtuple
from tensorflow import name_scope, variable_scope, stop_gradient
from .ObjectDetection import ObjectDetectionNet
Point = namedtuple('Point',['x', 'y'])
class AdvancedNet(ObjectDetectionNet):
def __init__(self, labels_provided, imshape, lr=1e-3, activations='elu', width=1.0):
self.width=width
self.imshape = Point(*imshape)
self.outshape = Point(int(imshape[0] / 16), int(imshape[1] / 16))
super().__init__(labels_provided, lr)
self.default_activation = activations
def _add_featuremap(self):
separable_conv = self._separable_conv2d
conv = self._conv2d
deconv = self._deconv
concat = self._concat
maxpool = self._max_pool
bn = self._batch_norm
fc = self._fullyconnected
drop_out = self._drop_out_conv
def upsampling(input, filters, name):
with variable_scope('upsampling_' + name):
input = separable_conv(input, filters, strides=2, BN_FLAG=False, name='upsampling')
with variable_scope('tower1'):
t1 = separable_conv(input, int(filters*self.width), BN_FLAG=False, name='conv1')
t1 = separable_conv(t1, int(filters*self.width), BN_FLAG=False, name='conv2')
t1 = separable_conv(t1, int(filters*self.width), BN_FLAG=False, name='conv3')
t1 = separable_conv(t1, int(filters*self.width), BN_FLAG=False, name='conv4')
with variable_scope('tower2'):
t2 = separable_conv(input, int(filters*self.width), BN_FLAG=False, name='conv1')
t2 = separable_conv(t2, int(filters*self.width), BN_FLAG=False, name='conv2')
with variable_scope('tower3'):
t3 = separable_conv(input, int(filters*self.width), BN_FLAG=False, name='regularization')
c = concat([t1, t2, t3], axis=3, name='concat')
c = separable_conv(c, int(filters*self.width), BN_FLAG=False, name="output")
return c
def downsampling(input, filters, name):
with variable_scope('downsampling_' + name):
d = deconv(input, int(filters*self.width), [3,3], [2,2], BN_FLAG=False, padding='SAME')
d = separable_conv(d, int(filters*self.width), BN_FLAG=False, name='output')
return d
def lateral_connection(td, dt, filters, name):
with variable_scope('lateral_'+name):
dt = stop_gradient(dt, name="stop_G")
l = separable_conv(dt, int(filters*self.width), BN_FLAG=False, name="L")
output = concat((td, l))
return separable_conv(output, int(filters*self.width), BN_FLAG=False, name="force_choice")
inputs = self.input_img
with name_scope('inputs'):
inputs = tf.subtract( tf.divide(inputs, 127.5), 1.0, name="img_norm")
c1 = conv(inputs, 8, strides=2, BN_FLAG=False, name='conv1')
c2 = separable_conv(c1, 32, BN_FLAG=False, strides=2, name='conv2')
c3 = upsampling(c2, 64, name="up3")
c4 = separable_conv(c3, 128, BN_FLAG=False, strides=2, name='conv4')
c5 = upsampling(c4, 256, name="up5")
c6 = separable_conv(c5, 512, BN_FLAG=False, strides=2, name='conv6')
d5 = downsampling(c6, 256, name="down5")
d5 = lateral_connection(d5, c5, 256, name="l5")
d4 = downsampling(d5, 128, name="down4")
d4 = lateral_connection(d4, c4, 128, name="l4")
d3 = downsampling(d4, 64, name="down3")
d3 = lateral_connection(d3, c3, 64, name="l3")
d3 = drop_out(d3, "d3_dropout")
self.feature_map = separable_conv(d3, self.K * (self.n_classes + 4 + 1), strides=2, name='feature_map') | {
"content_hash": "fd993c576ff2beb9a00c319ea9b465bc",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 111,
"avg_line_length": 38.65384615384615,
"alnum_prop": 0.591044776119403,
"repo_name": "aponamarev/FootBallTracking",
"id": "4649e8453e27e2f18f8221c72a6975baf3bf8e46",
"size": "4042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/AdvancedNet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "107786"
}
],
"symlink_target": ""
} |
class Solution:
def __init__(self):
self.stack = []
def push(self, node):
self.stack.append(node)
# write code here
def pop(self):
return self.stack.pop()
# write code here
def top(self):
if len(stack) == 0: return None
return self.stack[-1]
# write code here
def min(self):
return min(self.stack)
# write code here | {
"content_hash": "0c6de2a249f86d715428b784e48005cf",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 39,
"avg_line_length": 24.941176470588236,
"alnum_prop": 0.5235849056603774,
"repo_name": "linjinjin123/leetcode",
"id": "7878e61f2829194437c050e2f41dcac4a968e1a5",
"size": "571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "剑指offer/包含min函数的栈.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "54722"
},
{
"name": "Shell",
"bytes": "262"
}
],
"symlink_target": ""
} |
from pandajedi.jedicore.JediTaskBufferInterface import JediTaskBufferInterface
tbIF = JediTaskBufferInterface()
tbIF.setupInterface()
from pandajedi.jediddm.DDMInterface import DDMInterface
ddmIF = DDMInterface()
ddmIF.setupInterface()
import multiprocessing
from pandajedi.jediorder import TaskCommando
parent_conn, child_conn = multiprocessing.Pipe()
try:
testVO = sys.argv[1]
except:
testVO = 'any'
try:
testTaskType = sys.argv[2]
except:
testTaskType = 'any'
taskCommando = multiprocessing.Process(target=TaskCommando.launcher,
args=(child_conn,tbIF,ddmIF,
testVO,testTaskType))
taskCommando.start()
| {
"content_hash": "f0717bcd31e6a7859ef60a5299e869af",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 78,
"avg_line_length": 23.766666666666666,
"alnum_prop": 0.6956521739130435,
"repo_name": "RRCKI/panda-jedi",
"id": "f434390a9203e47b37a7cf2424ada68ab4f927df",
"size": "713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandajedi/jeditest/taskCommandoTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1452255"
},
{
"name": "Shell",
"bytes": "1892"
}
],
"symlink_target": ""
} |
"Tests for the `btclib.tx_in` module."
import json
from os import path
import pytest
from btclib.exceptions import BTClibValueError
from btclib.tx.out_point import OutPoint
from btclib.tx.tx import Tx
def test_out_point() -> None:
out_point = OutPoint()
assert out_point.tx_id == b"\x00" * 32
assert out_point.vout == 0xFFFFFFFF
assert out_point.hash == int.from_bytes(out_point.tx_id, "big", signed=False)
assert out_point.n == out_point.vout
assert out_point.is_coinbase()
assert out_point == OutPoint.parse(out_point.serialize())
assert out_point == OutPoint.from_dict(out_point.to_dict())
tx_id = "d5b5982254eebca64e4b42a3092a10bfb76ab430455b2bf0cf7c4f7f32db1c2e"
vout = 0
out_point = OutPoint(tx_id, vout)
assert out_point.tx_id.hex() == tx_id
assert out_point.vout == vout
assert out_point.hash == int.from_bytes(out_point.tx_id, "big", signed=False)
assert out_point.n == out_point.vout
assert not out_point.is_coinbase()
assert out_point == OutPoint.parse(out_point.serialize())
assert out_point == OutPoint.from_dict(out_point.to_dict())
def test_dataclasses_json_dict_out_point() -> None:
fname = "d4f3c2c3c218be868c77ae31bedb497e2f908d6ee5bbbe91e4933e6da680c970.bin"
filename = path.join(path.dirname(__file__), "_data", fname)
with open(filename, "rb") as binary_file_:
temp = Tx.parse(binary_file_.read())
out_point_data = temp.vin[0].prev_out
# dataclass
assert isinstance(out_point_data, OutPoint)
# Tx to/from dict
out_point_dict = out_point_data.to_dict()
assert isinstance(out_point_dict, dict)
assert out_point_data == OutPoint.from_dict(out_point_dict)
datadir = path.join(path.dirname(__file__), "_generated_files")
# Tx dict to/from dict file
filename = path.join(datadir, "out_point.json")
with open(filename, "w", encoding="ascii") as file_:
json.dump(out_point_dict, file_, indent=4)
with open(filename, "r", encoding="ascii") as file_:
out_point_dict2 = json.load(file_)
assert isinstance(out_point_dict2, dict)
assert out_point_dict == out_point_dict2
def test_invalid_outpoint() -> None:
out_point = OutPoint(b"\x01" * 31, 18, check_validity=False)
with pytest.raises(BTClibValueError, match="invalid OutPoint tx_id: "):
out_point.assert_valid()
out_point = OutPoint(b"\x01" * 32, -1, check_validity=False)
with pytest.raises(BTClibValueError, match="invalid vout: "):
out_point.assert_valid()
out_point = OutPoint(b"\x01" * 32, 0xFFFFFFFF + 1, check_validity=False)
with pytest.raises(BTClibValueError, match="invalid vout: "):
out_point.assert_valid()
out_point = OutPoint(b"\x00" * 31 + b"\x01", 0xFFFFFFFF, check_validity=False)
with pytest.raises(BTClibValueError, match="invalid OutPoint"):
out_point.assert_valid()
out_point = OutPoint(b"\x00" * 32, 0, check_validity=False)
with pytest.raises(BTClibValueError, match="invalid OutPoint"):
out_point.assert_valid()
| {
"content_hash": "6f4c5495cfd5c0e8af73977836aa60eb",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 82,
"avg_line_length": 36.903614457831324,
"alnum_prop": 0.6793992817499184,
"repo_name": "fametrano/BitcoinBlockchainTechnology",
"id": "c0ee2c1236e11ae1ab87bdf5507bb8c8c22ef897",
"size": "3439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tx/test_out_point.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1048"
},
{
"name": "Python",
"bytes": "254936"
}
],
"symlink_target": ""
} |
"""Noop tool that defines builder functions for non-default platforms to
avoid errors when scanning sconsscripts."""
import SCons.Builder
def generate(env):
"""SCons method."""
if not env.Bit('windows'):
builder = SCons.Builder.Builder(
action=''
)
env.Append(BUILDERS={'RES': builder, 'Grit': builder})
def exists(dummy):
return 1
| {
"content_hash": "26bc86cb833e1d5589555f17dc44fada",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 72,
"avg_line_length": 22.6875,
"alnum_prop": 0.6776859504132231,
"repo_name": "snak3ater/android_external_chromium_org_third_party_libjingle_source_talk",
"id": "b184ddc542d82b31fe31588b55043316020cac5d",
"size": "458",
"binary": false,
"copies": "7",
"ref": "refs/heads/kk",
"path": "site_scons/site_tools/talk_noops.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from ivreg.models import Voter
def test_index(app):
app.get('/')
def test_registration(app):
resp = app.get('/registration/')
resp.form['voter_id'] = '123'
resp = resp.form.submit()
resp = resp.follow()
assert resp.request.path.startswith('/ballot/')
def test_registration_callback(mocker, app):
mocker.patch('ivreg.views.generate_ballot_id', return_value='2EIWLRVNVNCXFHQDOLXQGIHHAI')
resp = app.post_json('/registration/', {'voter_id': '123'})
assert resp.json == {'redirect': 'http://localhost:80/ballot/2EIWLRVNVNCXFHQDOLXQGIHHAI/'}
def test_validation_error(app):
resp = app.get('/validate/')
assert resp.json == {'errors': 'Only POST method allowed.'}
resp = app.post('/validate/', {'ballot_id': '123'})
assert resp.json == {'errors': 'Only application/json requests are accepted.'}
resp = app.post_json('/validate/', {'ballot_id': '123'})
assert resp.json == {'errors': {'ballot_id': ['Given ballot id does not exist.']}}
def test_validation(app):
Voter.objects.create(voter_id='123', ballot_id='2EIWLRVNVNCXFHQDOLXQGIHHAI', candidates='{}')
resp = app.post_json('/validate/', {'ballot_id': '2EIWLRVNVNCXFHQDOLXQGIHHAI'})
assert resp.location == 'https://example.com/validate/123/'
def test_ballot(app):
Voter.objects.create(ballot_id='2EIWLRVNVNCXFHQDOLXQGIHHAI', candidates='{}')
app.get('/ballot/2EIWLRVNVNCXFHQDOLXQGIHHAI/')
| {
"content_hash": "a921c8ccc0e8ca6786c53dc02df31459",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 97,
"avg_line_length": 34.8780487804878,
"alnum_prop": 0.6727272727272727,
"repo_name": "00riddle00/internet-voting-authorization",
"id": "0d31a8df2ac349128dfd3bdc11643273affa7d4a",
"size": "1430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_app.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8591"
},
{
"name": "HTML",
"bytes": "19639"
},
{
"name": "Makefile",
"bytes": "89"
},
{
"name": "Python",
"bytes": "13146"
}
],
"symlink_target": ""
} |
"""For all the benchmarks that set options, test that the options are valid."""
from collections import defaultdict
import os
import unittest
from telemetry import benchmark as benchmark_module
from telemetry.core import browser_options
from telemetry.core import discover
from telemetry.unittest_util import progress_reporter
def _GetPerfDir(*subdirs):
perf_dir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(perf_dir, *subdirs)
def _GetAllPerfBenchmarks():
return discover.DiscoverClasses(
_GetPerfDir('benchmarks'), _GetPerfDir(), benchmark_module.Benchmark,
index_by_class_name=True).values()
def _BenchmarkOptionsTestGenerator(benchmark):
def testBenchmarkOptions(self): # pylint: disable=W0613
"""Invalid options will raise benchmark.InvalidOptionsError."""
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser()
benchmark.AddCommandLineArgs(parser)
benchmark_module.AddCommandLineArgs(parser)
benchmark.SetArgumentDefaults(parser)
return testBenchmarkOptions
class TestNoBenchmarkNamesDuplication(unittest.TestCase):
def runTest(self):
all_benchmarks = _GetAllPerfBenchmarks()
names_to_benchmarks = defaultdict(list)
for b in all_benchmarks:
names_to_benchmarks[b.Name()].append(b)
for n in names_to_benchmarks:
self.assertEquals(1, len(names_to_benchmarks[n]),
'Multiple benchmarks with the same name %s are '
'found: %s' % (n, str(names_to_benchmarks[n])))
def _AddBenchmarkOptionsTests(suite):
# Using |index_by_class_name=True| allows returning multiple benchmarks
# from a module.
all_benchmarks = _GetAllPerfBenchmarks()
for benchmark in all_benchmarks:
if not benchmark.options:
# No need to test benchmarks that have not defined options.
continue
class BenchmarkOptionsTest(unittest.TestCase):
pass
setattr(BenchmarkOptionsTest, benchmark.Name(),
_BenchmarkOptionsTestGenerator(benchmark))
suite.addTest(BenchmarkOptionsTest(benchmark.Name()))
suite.addTest(TestNoBenchmarkNamesDuplication())
def load_tests(loader, standard_tests, pattern):
del loader, standard_tests, pattern # unused
suite = progress_reporter.TestSuite()
_AddBenchmarkOptionsTests(suite)
return suite
| {
"content_hash": "f1582cba81381bea16bf9abb2d021cb9",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 35.87692307692308,
"alnum_prop": 0.7388507718696398,
"repo_name": "Fireblend/chromium-crosswalk",
"id": "571585e7412c98d07af6ab745b00c0c155bf6df3",
"size": "2495",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/perf/benchmarks/benchmark_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "34367"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9397825"
},
{
"name": "C++",
"bytes": "235052525"
},
{
"name": "CSS",
"bytes": "951745"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Emacs Lisp",
"bytes": "988"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "29070071"
},
{
"name": "Java",
"bytes": "10089056"
},
{
"name": "JavaScript",
"bytes": "20170506"
},
{
"name": "Makefile",
"bytes": "68234"
},
{
"name": "Objective-C",
"bytes": "1639405"
},
{
"name": "Objective-C++",
"bytes": "9478782"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "178732"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "465313"
},
{
"name": "Python",
"bytes": "8146950"
},
{
"name": "Shell",
"bytes": "473684"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
"""Build vocab and cache it so we don't have to keep running."""
import collections
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
flags.DEFINE_string('vocab_file_path', '/tmp/lra_data/aan',
'Path for vocab file output.')
FLAGS = flags.FLAGS
DATASET_PATHS = '/tmp/dataset'
def whitespace_tokenize(text):
"""Splits an input into tokens by whitespace."""
return text.strip().split()
def build_vocab(datasets,
special_tokens=(b'<pad>', b'<unk>', b'<s>', b'</s>'),
min_freq=10,
text_keys=None):
"""Returns a vocabulary of tokens with optional minimum frequency."""
# Count the tokens in the datasets.
logging.info('Building Vocab...')
counter = collections.Counter()
num_processed = 0
for dataset in datasets:
for example in tfds.as_numpy(dataset):
# logging.info(example)
for k in text_keys[:1]:
# logging.info(example[k])
counter.update(whitespace_tokenize(example[k][:100]))
num_processed += 1
if num_processed % 100 == 0:
logging.info('Processed %d', num_processed)
# Add special tokens to the start of vocab.
vocab = collections.OrderedDict()
for token in special_tokens:
vocab[token] = len(vocab)
# Add all other tokens to the vocab if their frequency is >= min_freq.
for token in sorted(list(counter.keys())):
if counter[token] >= min_freq:
vocab[token] = len(vocab)
logging.info('Number of unfiltered tokens: %d', len(counter))
logging.info('Vocabulary size: %d', len(vocab))
return vocab
def get_tsv_dataset(file_path, batch_size):
"""Preprocess dataset."""
tf.logging.info(file_path)
# sel_cols = ['label', 'id1', 'id2']
col_defaults = [tf.string, tf.string, tf.string, tf.string, tf.string]
col_names = ['label', 'id1', 'id2', 'text1', 'text2']
ds = tf.data.experimental.make_csv_dataset([file_path],
batch_size,
column_names=col_names,
column_defaults=col_defaults,
use_quote_delim=False,
field_delim='\t',
shuffle=False,
header=False,
num_epochs=1)
ds = ds.unbatch()
return ds
def get_dataset(batch_size):
"""Get dataset from matching datasets converts into src/tgt pairs."""
train_fps = DATASET_PATHS + '.train.tsv'
train = get_tsv_dataset(train_fps, batch_size)
def adapt_example(example):
return {
'Source1': example['text1'],
'Source2': example['text2'],
'Target': example['label']
}
train = train.map(adapt_example)
train = train.prefetch(tf.data.experimental.AUTOTUNE)
return train
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
train = get_dataset(1)
logging.info('Building/loading subword tokenizer')
encoder = tfds.deprecated.text.SubwordTextEncoder.build_from_corpus(
(en['Source1'].numpy() for en in train), target_vocab_size=2**13)
encoder.save_to_file(FLAGS.vocab_file_path)
logging.info('Saved')
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "e6c21101cc3c0b1e8048be7af50528fc",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 74,
"avg_line_length": 31.486238532110093,
"alnum_prop": 0.592948717948718,
"repo_name": "google-research/long-range-arena",
"id": "38bc47caf2acda48d7a3fdd996ba73cfc1febebf",
"size": "4004",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "lra_benchmarks/matching/build_vocab.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "507289"
}
],
"symlink_target": ""
} |
"""Utility class for producing a scansion pattern for a Latin hendecasyllables.
Given a line of hendecasyllables, the scan method performs a series of transformation and checks
are performed and for each one performed successfully, a note is added to the scansion_notes
list so that end users may view the provenance of a scansion.
"""
import re
import cltk.prosody.lat.string_utils as string_utils
from cltk.prosody.lat.metrical_validator import MetricalValidator
from cltk.prosody.lat.scansion_constants import ScansionConstants
from cltk.prosody.lat.scansion_formatter import ScansionFormatter
from cltk.prosody.lat.syllabifier import Syllabifier
from cltk.prosody.lat.verse import Verse
from cltk.prosody.lat.verse_scanner import VerseScanner
__author__ = ["Todd Cook <todd.g.cook@gmail.com>"]
__license__ = "MIT License"
class HendecasyllableScanner(VerseScanner):
"""The scansion symbols used can be configured by passing a suitable constants class to
the constructor."""
def __init__(
self,
constants=ScansionConstants(),
syllabifier=Syllabifier(),
optional_tranform=False,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.constants = constants
self.remove_punct_map = string_utils.remove_punctuation_dict()
self.punctuation_substitutions = string_utils.punctuation_for_spaces_dict()
self.metrical_validator = MetricalValidator(constants)
self.formatter = ScansionFormatter(constants)
self.syllabifier = syllabifier
self.inverted_amphibrach_re = re.compile(
r"{}\s*{}\s*{}".format(
self.constants.STRESSED,
self.constants.UNSTRESSED,
self.constants.STRESSED,
)
)
self.syllable_matcher = re.compile(
r"[{}]".format(
self.constants.VOWELS
+ self.constants.ACCENTED_VOWELS
+ self.constants.LIQUIDS
+ self.constants.MUTES
)
)
self.optional_transform = optional_tranform
def scan(self, original_line: str, optional_transform: bool = False) -> Verse:
"""
Scan a line of Latin hendecasyllables and produce a scansion pattern, and other data.
:param original_line: the original line of Latin verse
:param optional_transform: whether or not to perform i to j transform for syllabification
:return: a Verse object
>>> scanner = HendecasyllableScanner()
>>> print(scanner.scan("Cui dono lepidum novum libellum"))
Verse(original='Cui dono lepidum novum libellum', scansion=' - U - U U - U - U - U ', meter='hendecasyllable', valid=True, syllable_count=11, accented='Cui donō lepidūm novūm libēllum', scansion_notes=['Corrected invalid start.'], syllables = ['Cui', 'do', 'no', 'le', 'pi', 'dūm', 'no', 'vūm', 'li', 'bēl', 'lum'])
>>> print(scanner.scan(
... "ārida modo pumice expolitum?").scansion) # doctest: +NORMALIZE_WHITESPACE
- U - U U - U - U - U
"""
verse = Verse(original_line, meter="hendecasyllable")
# replace punctuation with spaces
line = original_line.translate(self.punctuation_substitutions)
# conservative i to j
line = self.transform_i_to_j(line)
working_line = self.elide_all(line)
working_line = self.accent_by_position(working_line)
syllables = self.syllabifier.syllabify(working_line)
if optional_transform:
working_line = self.transform_i_to_j_optional(line)
working_line = self.elide_all(working_line)
working_line = self.accent_by_position(working_line)
syllables = self.syllabifier.syllabify(working_line)
verse.scansion_notes += [self.constants.NOTE_MAP["optional i to j"]]
verse.working_line = working_line
verse.syllable_count = self.syllabifier.get_syllable_count(syllables)
verse.syllables = syllables
# identify some obvious and probably choices based on number of syllables
if verse.syllable_count > 11:
verse.valid = False
verse.scansion_notes += [self.constants.NOTE_MAP["> 11"]]
return verse
if verse.syllable_count < 11:
verse.valid = False
verse.scansion_notes += [self.constants.NOTE_MAP["< 11"]]
return verse
stresses = self.flag_dipthongs(syllables)
syllables_wspaces = string_utils.to_syllables_with_trailing_spaces(
working_line, syllables
)
offset_map = self.calc_offset(syllables_wspaces)
for idx, syl in enumerate(syllables):
for accented in self.constants.ACCENTED_VOWELS:
if accented in syl:
stresses.append(idx)
# second to last syllable is always long
stresses.append(verse.syllable_count - 2)
verse.scansion = self.produce_scansion(stresses, syllables_wspaces, offset_map)
if len(
string_utils.stress_positions(self.constants.STRESSED, verse.scansion)
) != len(set(stresses)):
verse.valid = False
verse.scansion_notes += [self.constants.NOTE_MAP["invalid syllables"]]
return verse
if self.metrical_validator.is_valid_hendecasyllables(verse.scansion):
verse.scansion_notes += [self.constants.NOTE_MAP["positionally"]]
return self.assign_candidate(verse, verse.scansion)
smoothed = self.correct_invalid_start(verse.scansion)
if verse.scansion != smoothed:
verse.scansion_notes += [self.constants.NOTE_MAP["invalid start"]]
verse.scansion = smoothed
stresses += string_utils.differences(verse.scansion, smoothed)
if self.metrical_validator.is_valid_hendecasyllables(verse.scansion):
return self.assign_candidate(verse, verse.scansion)
smoothed = self.correct_antepenult_chain(verse.scansion)
if verse.scansion != smoothed:
verse.scansion_notes += [self.constants.NOTE_MAP["antepenult chain"]]
verse.scansion = smoothed
stresses += string_utils.differences(verse.scansion, smoothed)
if self.metrical_validator.is_valid_hendecasyllables(verse.scansion):
return self.assign_candidate(verse, verse.scansion)
candidates = self.metrical_validator.closest_hendecasyllable_patterns(
verse.scansion
)
if candidates is not None:
if (
len(candidates) == 1
and len(verse.scansion.replace(" ", "")) == len(candidates[0])
and len(string_utils.differences(verse.scansion, candidates[0])) == 1
):
tmp_scansion = self.produce_scansion(
string_utils.differences(verse.scansion, candidates[0]),
syllables_wspaces,
offset_map,
)
if self.metrical_validator.is_valid_hendecasyllables(tmp_scansion):
verse.scansion_notes += [self.constants.NOTE_MAP["closest match"]]
return self.assign_candidate(verse, tmp_scansion)
# if the line doesn't scan "as is", if may scan if the optional i to j transformations
# are made, so here we set them and try again.
if self.optional_transform and not verse.valid:
return self.scan(original_line, optional_transform=True)
verse.accented = self.formatter.merge_line_scansion(
verse.original, verse.scansion
)
return verse
def correct_invalid_start(self, scansion: str) -> str:
"""
The third syllable of a hendecasyllabic line is long, so we will convert it.
:param scansion: scansion string
:return: scansion string with corrected start
>>> print(HendecasyllableScanner().correct_invalid_start(
... "- U U U U - U - U - U").strip())
- U - U U - U - U - U
"""
mark_list = string_utils.mark_list(scansion)
vals = list(scansion.replace(" ", ""))
corrected = vals[:2] + [self.constants.STRESSED] + vals[3:]
new_line = list(" " * len(scansion))
for idx, car in enumerate(corrected):
new_line[mark_list[idx]] = car
return "".join(new_line)
def correct_antepenult_chain(self, scansion: str) -> str:
"""
For hendecasyllables the last three feet of the verse are predictable
and do not regularly allow substitutions.
:param scansion: scansion line thus far
:return: corrected line of scansion
>>> print(HendecasyllableScanner().correct_antepenult_chain(
... "-U -UU UU UU UX").strip())
-U -UU -U -U -X
"""
mark_list = string_utils.mark_list(scansion)
vals = list(scansion.replace(" ", ""))
new_vals = (
vals[: len(vals) - 6]
+ [
self.constants.TROCHEE
+ self.constants.TROCHEE
+ self.constants.STRESSED
]
+ vals[-1:]
)
corrected = "".join(new_vals)
new_line = list(" " * len(scansion))
for idx, car in enumerate(corrected):
new_line[mark_list[idx]] = car
return "".join(new_line)
| {
"content_hash": "8afde8f14f35b819fc98067352b05943",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 331,
"avg_line_length": 43.24311926605505,
"alnum_prop": 0.6169513100668294,
"repo_name": "cltk/cltk",
"id": "9f503b4fb37a88276abf3990641b8ce16afe7643",
"size": "9435",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/cltk/prosody/lat/hendecasyllable_scanner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "120521"
},
{
"name": "Makefile",
"bytes": "2633"
},
{
"name": "Python",
"bytes": "3336083"
}
],
"symlink_target": ""
} |
"""Unit tests for test_env.py functionality.
Each unit test is launches python process that uses test_env.py
to launch another python process. Then signal handling and
propagation is tested. This similates how Swarming uses test_env.py.
"""
import os
import signal
import subprocess
import sys
import time
import unittest
TEST_SCRIPT = 'test_env_user_script.py'
def launch_process_windows(args):
return subprocess.Popen(
[sys.executable, TEST_SCRIPT] + args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=os.environ.copy(),
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
def launch_process_nonwindows(args):
return subprocess.Popen(
[sys.executable, TEST_SCRIPT] + args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=os.environ.copy())
def read_subprocess_message(proc, starts_with):
"""Finds the value after first line prefix condition."""
for line in proc.stdout:
if line.startswith(starts_with):
return line.rstrip().replace(starts_with, '')
def send_and_wait(proc, sig, sleep_time=0.3):
"""Sends a signal to subprocess."""
time.sleep(sleep_time) # gives process time to launch.
os.kill(proc.pid, sig)
proc.wait()
class SignalingWindowsTest(unittest.TestCase):
def setUp(self):
super(SignalingWindowsTest, self).setUp()
if sys.platform != 'win32':
self.skipTest('test only runs on Windows')
def test_send_ctrl_break_event(self):
proc = launch_process_windows([])
send_and_wait(proc, signal.CTRL_BREAK_EVENT)
sig = read_subprocess_message(proc, 'Signal :')
self.assertEqual(sig, str(signal.SIGBREAK))
class SignalingNonWindowsTest(unittest.TestCase):
def setUp(self):
super(SignalingNonWindowsTest, self).setUp()
if sys.platform == 'win32':
self.skipTest('test does not run on Windows')
def test_send_sigterm(self):
proc = launch_process_nonwindows([])
send_and_wait(proc, signal.SIGTERM)
sig = read_subprocess_message(proc, 'Signal :')
self.assertEqual(sig, str(signal.SIGTERM))
def test_send_sigint(self):
proc = launch_process_nonwindows([])
send_and_wait(proc, signal.SIGINT)
sig = read_subprocess_message(proc, 'Signal :')
self.assertEqual(sig, str(signal.SIGINT))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "0eb83ac539018002d053e4e81044b2a4",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 68,
"avg_line_length": 28.775,
"alnum_prop": 0.7089487402258905,
"repo_name": "endlessm/chromium-browser",
"id": "76d5766debce418275e64db818c3c968fd5d7d91",
"size": "2491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/test_env_unittest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
__author__ = 'lorenzo'
from config.config import _REST_SERVICE, _SERVICE
def format_message(exception, root=_REST_SERVICE):
if root == 'hydra':
root = _SERVICE + '/hydra/'
return str({"error": 1, "exception": exception, "back": root})
| {
"content_hash": "ec84683e152bd50a68055d4ec590c7e7",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 66,
"avg_line_length": 28.22222222222222,
"alnum_prop": 0.6338582677165354,
"repo_name": "pincopallino93/rdfendpoints",
"id": "a640687ff46e9db6bc42fba5ec40023148de4d86",
"size": "254",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "flankers/errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1044"
},
{
"name": "HTML",
"bytes": "13039"
},
{
"name": "JavaScript",
"bytes": "4028"
},
{
"name": "Python",
"bytes": "1364702"
}
],
"symlink_target": ""
} |
"""Ptransform overrides for DataflowRunner."""
from apache_beam.coders import typecoders
from apache_beam.pipeline import PTransformOverride
class CreatePTransformOverride(PTransformOverride):
"""A ``PTransformOverride`` for ``Create`` in streaming mode."""
def matches(self, applied_ptransform):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import Create
from apache_beam.options.pipeline_options import StandardOptions
if isinstance(applied_ptransform.transform, Create):
standard_options = (applied_ptransform
.outputs[None]
.pipeline._options
.view_as(StandardOptions))
return standard_options.streaming
else:
return False
def get_replacement_transform(self, ptransform):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.native_io.streaming_create import \
StreamingCreate
coder = typecoders.registry.get_coder(ptransform.get_output_type())
return StreamingCreate(ptransform.value, coder)
| {
"content_hash": "36d2386548edc727a39487fa46f0d996",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 73,
"avg_line_length": 39.483870967741936,
"alnum_prop": 0.7124183006535948,
"repo_name": "tgroh/incubator-beam",
"id": "0ce212fa31bdd79d1a285448d64444cb1d6075bc",
"size": "2009",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/runners/dataflow/ptransform_overrides.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22449"
},
{
"name": "Java",
"bytes": "9720078"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
} |
"""AMPAL objects that represent ligands."""
from ampal.base_ampal import Polymer, Monomer
class LigandGroup(Polymer):
"""A container for `Ligand` `Monomers`.
Parameters
----------
monomers : Monomer or [Monomer], optional
Monomer or list containing Monomer objects to form the Polymer().
polymer_id : str, optional
An ID that the user can use to identify the `Polymer`. This is
used when generating a pdb file using `Polymer().pdb`.
ampal_parent : ampal.Assembly, optional
Reference to `Assembly` containing the `Polymer`.
sl : int, optional
The default smoothing level used when calculating the
backbone primitive.
"""
def __init__(self, monomers=None, polymer_id=' ', ampal_parent=None, sl=2):
super().__init__(
monomers=monomers, polymer_id=polymer_id, molecule_type='ligands',
ampal_parent=ampal_parent, sl=sl)
def __repr__(self):
return '<Ligands chain containing {} {}>'.format(
len(self._monomers),
'Ligand' if len(self._monomers) == 1 else 'Ligands')
@property
def categories(self):
"""Returns the categories of `Ligands` in `LigandGroup`."""
category_dict = {}
for ligand in self:
if ligand.category in category_dict:
category_dict[ligand.category].append(ligand)
else:
category_dict[ligand.category] = [ligand]
return category_dict
@property
def category_count(self):
"""Returns the number of categories in `categories`."""
category_dict = self.categories
count_dict = {category: len(
category_dict[category]) for category in category_dict}
return count_dict
class Ligand(Monomer):
"""`Monomer` that represents a `Ligand`.
Notes
-----
All `Monomers` that do not have dedicated classes are
represented using the `Ligand` class.
Parameters
----------
mol_code : str
PDB molecule code that represents the monomer.
atoms : OrderedDict, optional
OrderedDict containing Atoms for the Monomer. OrderedDict
is used to maintain the order items were added to the
dictionary.
monomer_id : str, optional
String used to identify the residue.
insertion_code : str, optional
Insertion code of monomer, used if reading from pdb.
is_hetero : bool, optional
True if is a hetero atom in pdb. Helps with PDB formatting.
Attributes
----------
atoms : OrderedDict
OrderedDict containing Atoms for the Monomer. OrderedDict
is used to maintain the order items were added to the
dictionary.
mol_code : str
PDB molecule code that represents the `Ligand`.
insertion_code : str
Insertion code of `Ligand`, used if reading from pdb.
is_hetero : bool
True if is a hetero atom in pdb. Helps with PDB formatting.
self.states : dict
Contains an `OrderedDicts` containing atom information for each
state available for the `Ligand`.
id : str
String used to identify the residue.
ampal_parent : Polymer or None
A reference to the `LigandGroup` containing this `Ligand`.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
"""
def __init__(self, mol_code, atoms=None, monomer_id=' ', insertion_code=' ',
is_hetero=False, ampal_parent=None):
super(Ligand, self).__init__(
atoms, monomer_id, ampal_parent=ampal_parent)
self.mol_code = mol_code
self.insertion_code = insertion_code
self.is_hetero = is_hetero
def __repr__(self):
return '<Ligand containing {} {}. Ligand code: {}>'.format(
len(self.atoms), 'Atom' if len(self.atoms) == 1 else 'Atoms',
self.mol_code)
__author__ = "Christopher W. Wood, Kieran L. Hudson"
| {
"content_hash": "ca517c64f6384ffaac06c7762217d41e",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 80,
"avg_line_length": 35.78448275862069,
"alnum_prop": 0.6270778125752831,
"repo_name": "woolfson-group/isambard",
"id": "ee254dc22603cca8327fe9173c5de25880cc1911",
"size": "4151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isambard/ampal/ligands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5446"
},
{
"name": "Python",
"bytes": "657368"
},
{
"name": "Shell",
"bytes": "522"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils import timezone
# Category
class Category(models.Model):
name = models.TextField()
name_search = models.CharField(max_length=512)
alias = models.CharField(max_length=100)
description = models.TextField()
parent = models.ForeignKey('self', null=True, default=None)
level = models.IntegerField()
order = models.IntegerField()
path = models.CharField(max_length=512, null=True)
state = models.BooleanField(default=True)
created = models.DateTimeField()
created_by = models.CharField(max_length=100, null=True, default=None)
modified = models.DateTimeField()
modified_by = models.CharField(max_length=100, null=True, default=None)
def __str__(self):
return self.name
class Meta:
ordering = ['order']
# Doc Type
class DocType(models.Model):
name = models.TextField()
name_search = models.CharField(max_length=512)
alias = models.CharField(max_length=100)
state = models.BooleanField(default=True)
created = models.DateTimeField()
created_by = models.CharField(max_length=100, null=True, default=None)
modified = models.DateTimeField()
modified_by = models.CharField(max_length=100, null=True, default=None)
def __str__(self):
return self.alias
class Meta:
ordering = ['alias']
# Language
class Language(models.Model):
name = models.TextField()
name_search = models.CharField(max_length=512)
alias = models.CharField(max_length=100)
state = models.BooleanField(default=True)
created = models.DateTimeField()
created_by = models.CharField(max_length=100, null=True, default=None)
modified = models.DateTimeField()
modified_by = models.CharField(max_length=100, null=True, default=None)
def __str__(self):
return self.alias
class Meta:
ordering = ['alias']
# License
class License(models.Model):
name = models.TextField()
name_search = models.CharField(max_length=512)
alias = models.CharField(max_length=100)
description = models.TextField() # Описание
state = models.BooleanField(default=True)
created = models.DateTimeField()
created_by = models.CharField(max_length=100, null=True, default=None)
modified = models.DateTimeField()
modified_by = models.CharField(max_length=100, null=True, default=None)
def __str__(self):
return self.name
class Meta:
ordering = ['alias']
# Article
class Article(models.Model):
title = models.TextField() # Заголовок
title_search = models.CharField(max_length=512)
alias = models.CharField(max_length=100, null=True) # Псевдоним (служебное поле)
patch = models.CharField(max_length=512, null=True) # Путь
thumb_src = models.CharField(max_length=512, null=True) # Ссылка на изображение-предпросмотр
intro = models.TextField() # Вводный текст
content = models.TextField() # Текст статьи
description = models.TextField() # Описание
category = models.ForeignKey(Category, null=True, default=None) # Категория
language = models.ForeignKey(Language, null=True, default=None) # Язык
license = models.ForeignKey(License, null=True, default=None) # Лицензия
copyright = models.CharField(max_length=512, null=True) # Копирайт
source = models.CharField(max_length=512, null=True) # Источник
source_url = models.CharField(max_length=512, null=True) # Ссылка на источник
state = models.BooleanField(default=True) # Статус (опубликована ли статья)
on_main = models.BooleanField(default=False) # На главной ли статья
created = models.DateTimeField()
created_by = models.CharField(max_length=100, null=True, default=None)
modified = models.DateTimeField()
modified_by = models.CharField(max_length=100, null=True, default=None)
published = models.DateTimeField(null=True, default=None)
published_by = models.CharField(max_length=100, null=True, default=None)
pub_from = models.DateTimeField(null=True, default=None)
pub_to = models.DateTimeField(null=True, default=None)
def __str__(self):
return self.title
class Meta:
ordering = ['-created']
# Document
class Document(models.Model):
title = models.TextField() # Заголовок
title_search = models.CharField(max_length=512)
alias = models.CharField(max_length=100, null=True) # Псевдоним (служебное поле)
patch = models.CharField(max_length=512, null=True) # Путь
src = models.CharField(max_length=512, null=True) # Ссылка на расположение
thumb_src = models.CharField(max_length=512, null=True) # Ссылка на изображение-предпросмотр
doc_type = models.ForeignKey(DocType, null=True, default=None) # Тип документа
description = models.TextField() # Описание
category = models.ForeignKey(Category, null=True, default=None) # Категория
language = models.ForeignKey(Language, null=True, default=None) # Язык
license = models.ForeignKey(License, null=True, default=None) # Лицензия
copyright = models.CharField(max_length=512, null=True) # Копирайт
source = models.CharField(max_length=512, null=True) # Источник
source_url = models.CharField(max_length=512, null=True) # Ссылка на источник
state = models.BooleanField(default=True)
created = models.DateTimeField()
created_by = models.CharField(max_length=100, null=True, default=None)
published = models.DateTimeField(null=True, default=None)
published_by = models.CharField(max_length=100, null=True, default=None)
pub_from = models.DateTimeField(null=True, default=None)
pub_to = models.DateTimeField(null=True, default=None)
hash_md5 = models.CharField(max_length=100, null=True, default=None)
def __str__(self):
return self.title
class Meta:
ordering = ['-created']
# Document Collection
class DocumentCollection(models.Model):
title = models.TextField() # Заголовок
title_search = models.CharField(max_length=512)
alias = models.CharField(max_length=100, null=True)
state = models.BooleanField(default=True)
created = models.DateTimeField()
created_by = models.CharField(max_length=100, null=True, default=None)
modified = models.DateTimeField()
modified_by = models.CharField(max_length=100, null=True, default=None)
to_article = models.ManyToManyField(
Article, db_table='anodos_collection_to_article')
to_document = models.ManyToManyField(
Document, db_table='anodos_collection_to_document')
def __str__(self):
return self.title
class Meta:
ordering = ['-created']
# Log manager
class LogManager(models.Manager):
def add(self, subject, channel, title, description):
log = Log(
subject = subject[:100],
channel = channel[:100],
title = title[:100],
description = description,
created = timezone.now())
print(log)
log.save()
return log
# Log
class Log(models.Model):
subject = models.CharField(max_length=100)
channel = models.CharField(max_length=100)
title = models.CharField(max_length=100)
description = models.TextField()
created = models.DateTimeField()
objects = LogManager()
def __str__(self):
return "{} | {} | {} | {}".format(self.subject, self.channel, self.title, self.description)
class Meta:
ordering = ['-created']
| {
"content_hash": "c9dab47eeff74090b4cc3b7da99c5c12",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 116,
"avg_line_length": 39.77570093457944,
"alnum_prop": 0.6018562030075187,
"repo_name": "abezpalov/anodos",
"id": "0b838cd8bd022a8d7cd5c56a1679f6330dfc928d",
"size": "8881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "130296"
},
{
"name": "HTML",
"bytes": "2595"
},
{
"name": "Python",
"bytes": "3833"
}
],
"symlink_target": ""
} |
import __future__
import os
import unittest
import distutils.dir_util
import tempfile
from test import test_support
try: set
except NameError: from sets import Set as set
import modulefinder
# Note: To test modulefinder with Python 2.2, sets.py and
# modulefinder.py must be available - they are not in the standard
# library.
TEST_DIR = tempfile.mkdtemp()
TEST_PATH = [TEST_DIR, os.path.dirname(__future__.__file__)]
# Each test description is a list of 5 items:
#
# 1. a module name that will be imported by modulefinder
# 2. a list of module names that modulefinder is required to find
# 3. a list of module names that modulefinder should complain
# about because they are not found
# 4. a list of module names that modulefinder should complain
# about because they MAY be not found
# 5. a string specifying packages to create; the format is obvious imo.
#
# Each package will be created in TEST_DIR, and TEST_DIR will be
# removed after the tests again.
# Modulefinder searches in a path that contains TEST_DIR, plus
# the standard Lib directory.
maybe_test = [
"a.module",
["a", "a.module", "sys",
"b"],
["c"], ["b.something"],
"""\
a/__init__.py
a/module.py
from b import something
from c import something
b/__init__.py
from sys import *
"""]
maybe_test_new = [
"a.module",
["a", "a.module", "sys",
"b", "__future__"],
["c"], ["b.something"],
"""\
a/__init__.py
a/module.py
from b import something
from c import something
b/__init__.py
from __future__ import absolute_import
from sys import *
"""]
package_test = [
"a.module",
["a", "a.b", "a.c", "a.module", "mymodule", "sys"],
["blahblah"], [],
"""\
mymodule.py
a/__init__.py
import blahblah
from a import b
import c
a/module.py
import sys
from a import b as x
from a.c import sillyname
a/b.py
a/c.py
from a.module import x
import mymodule as sillyname
from sys import version_info
"""]
absolute_import_test = [
"a.module",
["a", "a.module",
"b", "b.x", "b.y", "b.z",
"__future__", "sys", "exceptions"],
["blahblah"], [],
"""\
mymodule.py
a/__init__.py
a/module.py
from __future__ import absolute_import
import sys # sys
import blahblah # fails
import exceptions # exceptions
import b.x # b.x
from b import y # b.y
from b.z import * # b.z.*
a/exceptions.py
a/sys.py
import mymodule
a/b/__init__.py
a/b/x.py
a/b/y.py
a/b/z.py
b/__init__.py
import z
b/unused.py
b/x.py
b/y.py
b/z.py
"""]
relative_import_test = [
"a.module",
["__future__",
"a", "a.module",
"a.b", "a.b.y", "a.b.z",
"a.b.c", "a.b.c.moduleC",
"a.b.c.d", "a.b.c.e",
"a.b.x",
"exceptions"],
[], [],
"""\
mymodule.py
a/__init__.py
from .b import y, z # a.b.y, a.b.z
a/module.py
from __future__ import absolute_import # __future__
import exceptions # exceptions
a/exceptions.py
a/sys.py
a/b/__init__.py
from ..b import x # a.b.x
#from a.b.c import moduleC
from .c import moduleC # a.b.moduleC
a/b/x.py
a/b/y.py
a/b/z.py
a/b/g.py
a/b/c/__init__.py
from ..c import e # a.b.c.e
a/b/c/moduleC.py
from ..c import d # a.b.c.d
a/b/c/d.py
a/b/c/e.py
a/b/c/x.py
"""]
relative_import_test_2 = [
"a.module",
["a", "a.module",
"a.sys",
"a.b", "a.b.y", "a.b.z",
"a.b.c", "a.b.c.d",
"a.b.c.e",
"a.b.c.moduleC",
"a.b.c.f",
"a.b.x",
"a.another"],
[], [],
"""\
mymodule.py
a/__init__.py
from . import sys # a.sys
a/another.py
a/module.py
from .b import y, z # a.b.y, a.b.z
a/exceptions.py
a/sys.py
a/b/__init__.py
from .c import moduleC # a.b.c.moduleC
from .c import d # a.b.c.d
a/b/x.py
a/b/y.py
a/b/z.py
a/b/c/__init__.py
from . import e # a.b.c.e
a/b/c/moduleC.py
#
from . import f # a.b.c.f
from .. import x # a.b.x
from ... import another # a.another
a/b/c/d.py
a/b/c/e.py
a/b/c/f.py
"""]
relative_import_test_3 = [
"a.module",
["a", "a.module"],
["a.bar"],
[],
"""\
a/__init__.py
def foo(): pass
a/module.py
from . import foo
from . import bar
"""]
def open_file(path):
##print "#", os.path.abspath(path)
dirname = os.path.dirname(path)
distutils.dir_util.mkpath(dirname)
return open(path, "w")
def create_package(source):
ofi = None
try:
for line in source.splitlines():
if line.startswith(" ") or line.startswith("\t"):
ofi.write(line.strip() + "\n")
else:
if ofi:
ofi.close()
ofi = open_file(os.path.join(TEST_DIR, line.strip()))
finally:
if ofi:
ofi.close()
class ModuleFinderTest(unittest.TestCase):
def _do_test(self, info, report=False):
import_this, modules, missing, maybe_missing, source = info
create_package(source)
try:
mf = modulefinder.ModuleFinder(path=TEST_PATH)
mf.import_hook(import_this)
if report:
mf.report()
## # This wouldn't work in general when executed several times:
## opath = sys.path[:]
## sys.path = TEST_PATH
## try:
## __import__(import_this)
## except:
## import traceback; traceback.print_exc()
## sys.path = opath
## return
modules = set(modules)
found = set(mf.modules.keys())
more = list(found - modules)
less = list(modules - found)
# check if we found what we expected, not more, not less
self.assertEqual((more, less), ([], []))
# check for missing and maybe missing modules
bad, maybe = mf.any_missing_maybe()
self.assertEqual(bad, missing)
self.assertEqual(maybe, maybe_missing)
finally:
distutils.dir_util.remove_tree(TEST_DIR)
def test_package(self):
self._do_test(package_test)
def test_maybe(self):
self._do_test(maybe_test)
if getattr(__future__, "absolute_import", None):
def test_maybe_new(self):
self._do_test(maybe_test_new)
def test_absolute_imports(self):
self._do_test(absolute_import_test)
def test_relative_imports(self):
self._do_test(relative_import_test)
def test_relative_imports_2(self):
self._do_test(relative_import_test_2)
def test_relative_imports_3(self):
self._do_test(relative_import_test_3)
def test_extended_opargs(self):
extended_opargs_test = [
"a",
["a", "b"],
[], [],
"""\
a.py
%r
import b
b.py
""" % range(2**16)] # 2**16 constants
self._do_test(extended_opargs_test)
def test_main():
distutils.log.set_threshold(distutils.log.WARN)
test_support.run_unittest(ModuleFinderTest)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "c6cd4fc47e51483d529c665b9945a99c",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 83,
"avg_line_length": 27.99665551839465,
"alnum_prop": 0.47186716043483456,
"repo_name": "IronLanguages/ironpython2",
"id": "497ee5c68ce8f2f65cf882273c7db8b8afb34cc2",
"size": "8371",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "Src/StdLib/Lib/test/test_modulefinder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4080"
},
{
"name": "C",
"bytes": "20290"
},
{
"name": "C#",
"bytes": "12424667"
},
{
"name": "C++",
"bytes": "69156"
},
{
"name": "Classic ASP",
"bytes": "2117"
},
{
"name": "HTML",
"bytes": "13181412"
},
{
"name": "JavaScript",
"bytes": "1656"
},
{
"name": "Makefile",
"bytes": "332"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "67035"
},
{
"name": "Python",
"bytes": "27860071"
},
{
"name": "Roff",
"bytes": "21"
},
{
"name": "Shell",
"bytes": "193"
},
{
"name": "Smalltalk",
"bytes": "3"
},
{
"name": "VBScript",
"bytes": "974"
},
{
"name": "XSLT",
"bytes": "2058"
}
],
"symlink_target": ""
} |
import os
from flask import Flask, render_template, send_file, request, after_this_request, redirect, url_for, safe_join
import re
from config import MAX_FOLDER_DL_SIZE_BYTES, IGNORE_FILES, ROOT_PATHS
app = Flask(__name__)
def get_size(start_path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
import zipfile
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(
os.path.join(root, file),
arcname=os.path.join(root.replace(path, ''), file)
)
@app.route('/')
def index():
return render_template('index.html', items=ROOT_PATHS)
@app.route('/<int:id>/<path:path>')
@app.route('/<int:id>/')
def browse(id, path=''):
path = path.replace('../', '')
real_path = safe_join(ROOT_PATHS[id].path, path)
items = {
'dirs': [],
'files': [],
}
if os.path.isfile(real_path):
# If it's a file, send it.
return send_file(real_path,
as_attachment=request.args.get('download'))
else:
if request.args.get('download'):
folder_size = get_size(real_path)
if folder_size > MAX_FOLDER_DL_SIZE_BYTES:
print("TOO LARGE YO")
return "Folder too large. Exceeds maximum dl of {} '\
'bytes".format(MAX_FOLDER_DL_SIZE_BYTES)
print("Request for DL")
zipfilename = 'static/zips/{}.zip'.format(
os.path.basename(os.path.dirname(real_path))
)
zipf = zipfile.ZipFile(zipfilename, 'w')
zipdir(real_path, zipf)
zipf.close()
@after_this_request
def after(r):
os.unlink(zipfilename)
print("Done!")
return r
return send_file(zipfilename,
attachment_filename=os.path.basename(os.path.dirname(real_path)))
return "DL"
else:
for f in os.listdir(real_path):
if not re.match(IGNORE_FILES, f):
if os.path.isdir(os.path.join(real_path, f)):
item = (f, os.path.join(path, f) + '/')
items['dirs'].append(item)
else:
item = (f, os.path.join(path, f))
items['files'].append(item)
return render_template('browse.html', id=id, items=items)
return "lel"
if __name__ == '__main__':
import sys
if len(sys.argv) > 1 and sys.argv[1] == 'meinheld':
from meinheld import server
server.listen(("0.0.0.0", 8080))
server.run(app)
else:
app.debug = True
app.run(host="0.0.0.0", port=8080)
| {
"content_hash": "a6cf66712710e8368ac5d3926aa5f9bd",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 110,
"avg_line_length": 31.231578947368423,
"alnum_prop": 0.5244354566902595,
"repo_name": "nickw444/MediaBrowser",
"id": "9e5c2511a9b537220149fabc7105e5dd16ee77c5",
"size": "2967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mediabrowser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "119468"
},
{
"name": "HTML",
"bytes": "3039"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Makefile",
"bytes": "124"
},
{
"name": "Python",
"bytes": "3521"
}
],
"symlink_target": ""
} |
import pykube
import logging
from invoke import task
import requests
from prettytable import PrettyTable
KUBE_CONFIG_PATH="~/.kube/config"
kapi = pykube.HTTPClient(pykube.KubeConfig.from_file(KUBE_CONFIG_PATH))
log = logging.getLogger(__name__)
# log.setLevel(logging.DEBUG)
log.setLevel(logging.INFO)
console = logging.StreamHandler()
log.addHandler(console)
@task(optional=['version'])
def check(ctx,stack,env,active=True,use_ip=False,version=None):
"""Does health check for all the service for the given stack & environment"""
namespace = "%s-%s" % (stack,env)
active_str = 'true' if active else 'false'
services = pykube.Service.objects(kapi).filter(namespace=namespace,selector={'active':active_str})
table = PrettyTable(['URL', 'Status'])
for svc in services:
table.add_row(do_svc_health_check(svc,use_ip))
log.info(table)
def do_svc_health_check(service,use_ip):
spec = service.obj['spec']
service_port = spec.get('ports')[0]['port']
hostname = spec.get('clusterIP') if use_ip else "%s.%s" % (service.name,service.namespace)
url = "http://%s:%d/healthz" % (hostname,service_port)
try:
response = requests.get(url)
return url, response
except Exception:
return url, 'ERROR'
| {
"content_hash": "598b622f49e36e9c816f33f04183bc4f",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 102,
"avg_line_length": 35.22222222222222,
"alnum_prop": 0.692429022082019,
"repo_name": "cackharot/kubo",
"id": "ade6069c4a37f5beb6227e0d7c6b8fca791fe63d",
"size": "1268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "health_actions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15997"
},
{
"name": "Smarty",
"bytes": "1679"
}
],
"symlink_target": ""
} |
from docutils.statemachine import StringList
def get_rst_class_elements(
environment, module_name, module_path_name, whitelist_names=None,
undocumented_members=False, private_members=False,
force_partial_import=False, skip_attribute_value=False, rst_elements=None
):
"""Return :term:`reStructuredText` from class elements within
*environment*.
*module_name* is the module alias that should be added to each
directive.
*module_path_name* is the module path alias that should be added to each
directive.
*whitelist_names* is an optional list of element names that
should be displayed exclusively.
*undocumented_members* indicate whether undocumented element should be
displayed.
*private_members* indicate whether elements starting with an underscore
should be displayed.
*force_partial_import* indicate whether the import statement should
force the partial import display if necessary.
*skip_attribute_value* indicate whether attribute value within the class
should not be displayed.
*rst_elements* can be an initial dictionary that will be updated and
returned.
"""
if rst_elements is None:
rst_elements = {}
for class_environment in environment["class"].values():
name = class_environment["name"]
description = class_environment["description"]
if description is None and not undocumented_members:
continue
if name.startswith("_") and not private_members:
continue
if whitelist_names is None or name in whitelist_names:
line_number = class_environment["line_number"]
extra_options = []
if force_partial_import:
extra_options = [":force-partial-import:"]
if skip_attribute_value:
extra_options.append(":skip-attribute-value:")
rst_element = rst_generate(
directive="autoclass",
element_id=class_environment["id"],
alias=class_environment["name"],
module_alias=module_name,
module_path_alias=module_path_name,
extra_options=extra_options
)
rst_elements[line_number] = [rst_element]
return rst_elements
def get_rst_attribute_elements(
class_environment, whitelist_names=None, blacklist_ids=None,
undocumented_members=False, private_members=False, skip_value=False,
rst_elements=None,
):
"""Return :term:`reStructuredText` from class attribute elements within
*class_environment*.
*whitelist_names* is an optional list of element names that
should be displayed exclusively.
*blacklist_ids* is an optional list of element identifiers that
should not be displayed.
*undocumented_members* indicate whether undocumented element should be
displayed.
*private_members* indicate whether elements starting with an underscore
should be displayed.
*skip_value* indicate whether the value should not be displayed.
*rst_elements* can be an initial dictionary that will be updated and
returned.
"""
if rst_elements is None:
rst_elements = {}
# As a method can also be a variable, use the method directive
# by default when available
if blacklist_ids is None:
blacklist_ids = []
for attr_environment in class_environment["attribute"].values():
if attr_environment["id"] in blacklist_ids:
continue
name = attr_environment["name"]
description = attr_environment["description"]
if description is None and not undocumented_members:
continue
if name.startswith("_") and not private_members:
continue
extra_options = []
if skip_value:
extra_options.append(":skip-value:")
if whitelist_names is None or name in whitelist_names:
line_number = attr_environment["line_number"]
rst_element = rst_generate(
directive="autoattribute",
element_id=attr_environment["id"],
extra_options=extra_options
)
rst_elements[line_number] = [rst_element]
return rst_elements
def get_rst_method_elements(
class_environment, whitelist_names=None, skip_constructor=False,
undocumented_members=False, private_members=False, rst_elements=None
):
"""Return :term:`reStructuredText` from class method elements within
*class_environment*.
*whitelist_names* is an optional list of element names that
should be displayed exclusively.
*skip_constructor* indicate whether the class constructor should be
displayed.
*undocumented_members* indicate whether undocumented element should be
displayed.
*private_members* indicate whether elements starting with an underscore
should be displayed.
*rst_elements* can be an initial dictionary that will be updated and
returned.
"""
if rst_elements is None:
rst_elements = {}
for method_environment in class_environment["method"].values():
name = method_environment["name"]
if name == "constructor" and skip_constructor:
continue
description = method_environment["description"]
if description is None and not undocumented_members:
continue
if name.startswith("_") and not private_members:
continue
if whitelist_names is None or name in whitelist_names:
line_number = method_environment["line_number"]
rst_element = rst_generate(
directive="automethod",
element_id=method_environment["id"],
)
rst_elements[line_number] = [rst_element]
return rst_elements
def get_rst_function_elements(
environment, module_name, module_path_name, whitelist_names=None,
undocumented_members=False, private_members=False,
force_partial_import=False, rst_elements=None
):
"""Return :term:`reStructuredText` from function elements within
*environment*.
*module_name* is the module alias that should be added to each
directive.
*module_path_name* is the module path alias that should be added to each
directive.
*whitelist_names* is an optional list of element names that
should be displayed exclusively.
*undocumented_members* indicate whether undocumented element should be
displayed.
*private_members* indicate whether elements starting with an underscore
should be displayed.
*force_partial_import* indicate whether the import statement should
force the partial import display if necessary.
*rst_elements* can be an initial dictionary that will be updated and
returned.
"""
if rst_elements is None:
rst_elements = {}
for function_environment in environment["function"].values():
name = function_environment["name"]
description = function_environment["description"]
if description is None and not undocumented_members:
continue
if name.startswith("_") and not private_members:
continue
if whitelist_names is None or name in whitelist_names:
line_number = function_environment["line_number"]
extra_options = []
if force_partial_import:
extra_options = [":force-partial-import:"]
rst_element = rst_generate(
directive="autofunction",
element_id=function_environment["id"],
alias=function_environment["name"],
module_alias=module_name,
module_path_alias=module_path_name,
extra_options=extra_options
)
rst_elements[line_number] = [rst_element]
return rst_elements
def get_rst_data_elements(
environment, module_name, module_path_name, whitelist_names=None,
blacklist_ids=None, undocumented_members=False, private_members=False,
force_partial_import=False, skip_value=False, rst_elements=None,
):
"""Return :term:`reStructuredText` from data elements within
*environment*.
*module_name* is the module alias that should be added to each
directive.
*module_path_name* is the module path alias that should be added to each
directive.
*whitelist_names* is an optional list of element names that
should be displayed exclusively.
*blacklist_ids* is an optional list of element identifiers that
should not be displayed.
*undocumented_members* indicate whether undocumented element should be
displayed.
*private_members* indicate whether elements starting with an underscore
should be displayed.
*force_partial_import* indicate whether the import statement should
force the partial import display if necessary.
*skip_value* indicate whether the value should not be displayed.
*rst_elements* can be an initial dictionary that will be updated and
returned.
"""
if rst_elements is None:
rst_elements = {}
# As a function can also be a variable, use the function directive
# by default when available
if blacklist_ids is None:
blacklist_ids = []
for data_environment in environment["data"].values():
if data_environment["id"] in blacklist_ids:
continue
name = data_environment["name"]
description = data_environment["description"]
if description is None and not undocumented_members:
continue
if name.startswith("_") and not private_members:
continue
if whitelist_names is None or name in whitelist_names:
line_number = data_environment["line_number"]
extra_options = []
if force_partial_import:
extra_options.append(":force-partial-import:")
if skip_value:
extra_options.append(":skip-value:")
rst_element = rst_generate(
directive="autodata",
element_id=data_environment["id"],
alias=data_environment["name"],
module_alias=module_name,
module_path_alias=module_path_name,
extra_options=extra_options
)
rst_elements[line_number] = [rst_element]
return rst_elements
def get_rst_export_elements(
file_environment, environment, module_name, module_path_name,
skip_data_value=False, skip_attribute_value=False, rst_elements=None
):
"""Return :term:`reStructuredText` from exported elements within
*file_environment*.
*environment* is the full :term:`Javascript` environment processed
in :mod:`~champollion.parser`.
*module_name* is the module alias that should be added to each
directive.
*module_path_name* is the module path alias that should be added to each
directive.
*skip_data_value* indicate whether data value should not be displayed.
*skip_attribute_value* indicate whether attribute value should not be
displayed.
*rst_elements* can be an initial dictionary that will be updated and
returned.
"""
export_environment = file_environment["export"]
import_environment = file_environment["import"]
if rst_elements is None:
rst_elements = {}
for _exported_env_id, _exported_env in export_environment.items():
from_module_id = _exported_env["module"]
line_number = _exported_env["line_number"]
if line_number not in rst_elements.keys():
rst_elements[line_number] = []
name = _exported_env["name"]
alias = _exported_env["alias"]
if alias is None:
alias = name
# Update module origin and name from import if necessary
if (from_module_id is None and
_exported_env_id in import_environment.keys()):
name = import_environment[_exported_env_id]["name"]
from_module_id = import_environment[_exported_env_id]["module"]
# Ignore element if the origin module can not be found
if from_module_id not in environment["module"].keys():
continue
from_module_environment = environment["module"][from_module_id]
from_file_id = from_module_environment["file_id"]
from_file_env = environment["file"][from_file_id]
if name == "default":
rst_element = get_rst_default_from_file_environment(
from_file_env, alias, module_name, module_path_name,
skip_data_value=skip_data_value,
skip_attribute_value=skip_attribute_value,
)
if rst_element is None:
continue
rst_elements[line_number].append(rst_element)
elif name == "*":
extra_options = [
":force-partial-import:",
":members:",
":skip-description:"
]
if skip_data_value:
extra_options.append(":skip-data-value:")
if skip_attribute_value:
extra_options.append(":skip-attribute-value:")
rst_element = rst_generate(
directive="automodule",
element_id=from_module_id,
module_alias=module_name,
module_path_alias=module_path_name,
extra_options=extra_options
)
rst_elements[line_number].append(rst_element)
else:
rst_element = get_rst_name_from_file_environment(
name, from_file_env, alias, module_name, module_path_name,
skip_data_value=skip_data_value,
skip_attribute_value=skip_attribute_value,
)
if rst_element is None:
continue
rst_elements[line_number].append(rst_element)
return rst_elements
def get_rst_default_from_file_environment(
file_environment, alias, module_name, module_path_name,
skip_data_value=False, skip_attribute_value=False
):
"""Return :term:`reStructuredText` from default element in
*file_environment*.
*alias* is the name that should replace the element name.
*module_name* is the module alias that should replace the element
module name.
*module_path_name* is the module path alias that should be added to each
directive.
*skip_data_value* indicate whether data value should not be displayed.
*skip_attribute_value* indicate whether attribute value should not be
displayed.
.. warning::
Return None if no default is found in the file.
"""
for class_env in file_environment["class"].values():
if class_env["default"]:
extra_options = [":force-partial-import:"]
if skip_data_value:
extra_options.append(":skip-data-value:")
if skip_attribute_value:
extra_options.append(":skip-attribute-value:")
return rst_generate(
directive="autoclass",
element_id=class_env["id"],
alias=alias,
module_alias=module_name,
module_path_alias=module_path_name,
extra_options=extra_options
)
for function_env in file_environment["function"].values():
if function_env["default"]:
return rst_generate(
directive="autofunction",
element_id=function_env["id"],
alias=alias,
module_alias=module_name,
module_path_alias=module_path_name,
extra_options=[":force-partial-import:"]
)
for data_env in file_environment["data"].values():
if data_env["default"]:
extra_options = [":force-partial-import:"]
if skip_data_value:
extra_options.append(":skip-value:")
return rst_generate(
directive="autodata",
element_id=data_env["id"],
alias=alias,
module_alias=module_name,
module_path_alias=module_path_name,
extra_options=extra_options
)
def get_rst_name_from_file_environment(
name, file_environment, alias, module_name, module_path_name,
skip_data_value=False, skip_attribute_value=False
):
"""Return :term:`reStructuredText` element in *file_environment* from
*name*.
*alias* is the name that should replace the element name.
*module_name* is the module name that should replace the element
module name.
*module_path_name* is the module path alias that should be added to each
directive.
*skip_data_value* indicate whether data value should not be displayed.
*skip_attribute_value* indicate whether attribute value should not be
displayed.
.. warning::
Return None if the element is not found in the file.
"""
for class_env in file_environment["class"].values():
if class_env["name"] == name and class_env["exported"]:
extra_options = [":force-partial-import:"]
if skip_data_value:
extra_options.append(":skip-data-value:")
if skip_attribute_value:
extra_options.append(":skip-attribute-value:")
return rst_generate(
directive="autoclass",
element_id=class_env["id"],
alias=alias,
module_alias=module_name,
module_path_alias=module_path_name,
extra_options=extra_options
)
for function_env in file_environment["function"].values():
if function_env["name"] == name and function_env["exported"]:
return rst_generate(
directive="autofunction",
element_id=function_env["id"],
alias=alias,
module_alias=module_name,
module_path_alias=module_path_name,
extra_options=[":force-partial-import:"]
)
for data_env in file_environment["data"].values():
if data_env["name"] == name and data_env["exported"]:
extra_options = [":force-partial-import:"]
if skip_data_value:
extra_options.append(":skip-value:")
return rst_generate(
directive="autodata",
element_id=data_env["id"],
alias=alias,
module_alias=module_name,
module_path_alias=module_path_name,
extra_options=extra_options
)
def rst_generate(
directive, element_id, alias=None, module_alias=None,
module_path_alias=None, extra_options=None
):
"""Generate `StringList` from *directive* and *element_id*.
*directive* is one of the directive added to the :term:`Javascript`
domain by this sphinx extension.
*element_id* is an element ID returned by the
:mod:`~champollion.parser`.
*alias* is the name that should replace the element name.
*module_alias* is the module name that should replace the element
module name.
*module_path_alias* is the module path that should replace the element
module path.
*extra_options* can be a list of extra options to add to the directive.
"""
if extra_options is None:
extra_options = []
element_rst = "\n.. js:{directive}:: {id}\n".format(
directive=directive, id=element_id
)
if alias is not None:
element_rst += " :alias: {alias}\n".format(
alias=alias
)
if module_alias is not None:
element_rst += " :module-alias: {module}\n".format(
module=module_alias
)
if module_path_alias is not None:
element_rst += " :module-path-alias: {module}\n".format(
module=module_path_alias
)
for option in extra_options:
element_rst += " {option}\n".format(
option=option
)
element_rst += "\n"
return StringList(element_rst.split("\n"))
def rst_string(expression=""):
"""Return `StringList` from *expression*.
"""
return StringList(expression.split("\n"))
| {
"content_hash": "1c40cc312e5560fbe24e83f082dc9a2d",
"timestamp": "",
"source": "github",
"line_count": 618,
"max_line_length": 77,
"avg_line_length": 32.85113268608414,
"alnum_prop": 0.6214166092010639,
"repo_name": "buddly27/champollion",
"id": "bf521c5d474c8ed16a2729b9b2d8f2ee9d021be5",
"size": "20320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/champollion/directive/rst_generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "332819"
}
],
"symlink_target": ""
} |
from google.cloud import iam_v2beta
async def sample_get_policy():
# Create a client
client = iam_v2beta.PoliciesAsyncClient()
# Initialize request argument(s)
request = iam_v2beta.GetPolicyRequest(
name="name_value",
)
# Make the request
response = await client.get_policy(request=request)
# Handle the response
print(response)
# [END iam_v2beta_generated_Policies_GetPolicy_async]
| {
"content_hash": "428248a12b470452bb05a06f3e7d384b",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 55,
"avg_line_length": 22.789473684210527,
"alnum_prop": 0.6928406466512702,
"repo_name": "googleapis/python-iam",
"id": "08bf54bc7d0715bdc90f45790cab533e5654ea85",
"size": "1802",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/iam_v2beta_generated_policies_get_policy_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "778265"
},
{
"name": "Shell",
"bytes": "30651"
}
],
"symlink_target": ""
} |
"""
The Tornado Framework
By Ali Pesaranghader
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
"""
import random
from math import *
from streams.generators.tools.transition_functions import Transition
class MIXED:
def __init__(self, concept_length=20000, transition_length=50, noise_rate=0.1, random_seed=1):
self.__INSTANCES_NUM = 5 * concept_length
self.__CONCEPT_LENGTH = concept_length
self.__NUM_DRIFTS = 4
self.__W = transition_length
self.__RECORDS = []
self.__TRANSITIONS = []
self.__RANDOM_SEED = random_seed
random.seed(self.__RANDOM_SEED)
self.__NOISE_LOCATIONS = random.sample(range(0, self.__INSTANCES_NUM), int(self.__INSTANCES_NUM * noise_rate))
print("You are going to generate a " + self.get_class_name() + " data stream containing " +
str(self.__INSTANCES_NUM) + " instances, and " + str(self.__NUM_DRIFTS) + " concept drifts; " + "\n\r" +
"where they appear at every " + str(self.__CONCEPT_LENGTH) + " instances.")
@staticmethod
def get_class_name():
return MIXED.__name__
def generate(self, output_path="MIXED"):
random.seed(self.__RANDOM_SEED)
# [1] CREATING RECORDS
for i in range(0, self.__INSTANCES_NUM):
concept_sec = int(i / self.__CONCEPT_LENGTH)
dist_id = int(concept_sec % 2)
record = self.create_record(dist_id)
self.__RECORDS.append(list(record))
# [2] TRANSITION
for i in range(1, self.__NUM_DRIFTS + 1):
transition = []
if (i % 2) == 1:
for j in range(0, self.__W):
if random.random() < Transition.sigmoid(j, self.__W):
record = self.create_record(1)
else:
record = self.create_record(0)
transition.append(list(record))
else:
for j in range(0, self.__W):
if random.random() < Transition.sigmoid(j, self.__W):
record = self.create_record(0)
else:
record = self.create_record(1)
transition.append(list(record))
starting_index = i * self.__CONCEPT_LENGTH
ending_index = starting_index + self.__W
self.__RECORDS[starting_index: ending_index] = transition
# [3] ADDING NOISE
if len(self.__NOISE_LOCATIONS) != 0:
self.add_noise()
self.write_to_arff(output_path + ".arff")
def create_record(self, dist_func_id):
v, w, x, y, fx = self.create_attribute_values()
res = self.get_mixed2_result(v, w, y, fx)
if random.random() < 0.5:
while res is False:
v, w, x, y, fx = self.create_attribute_values()
res = self.get_mixed2_result(v, w, y, fx)
c = 'p'
else:
while res is True:
v, w, x, y, fx = self.create_attribute_values()
res = self.get_mixed2_result(v, w, y, fx)
c = 'n'
if dist_func_id == 1:
c = 'n' if c == 'p' else 'p'
return v, w, x, y, c
@staticmethod
def create_attribute_values():
v = random.choice([False, True])
w = random.choice([False, True])
x = random.uniform(0, 1)
y = random.uniform(0, 1)
fx = 0.5 + 0.3 * sin(3 * pi * x)
return v, w, x, y, fx
@staticmethod
def get_mixed2_result(v, w, y, fx):
if (fx > y and v is True) or (fx > y and w is True) or (v is True and w is True):
return True
else:
return False
def add_noise(self):
for i in range(0, len(self.__NOISE_LOCATIONS)):
noise_spot = self.__NOISE_LOCATIONS[i]
c = self.__RECORDS[noise_spot][4]
if c == 'p':
self.__RECORDS[noise_spot][4] = 'n'
else:
self.__RECORDS[noise_spot][4] = 'p'
def write_to_arff(self, output_path):
arff_writer = open(output_path, "w")
arff_writer.write("@relation MIXED" + "\n")
arff_writer.write("@attribute v {False,True}" + "\n" +
"@attribute w {False,True}" + "\n" +
"@attribute x real" + "\n" +
"@attribute y real" + "\n" +
"@attribute class {p,n}" + "\n\n")
arff_writer.write("@data" + "\n")
for i in range(0, len(self.__RECORDS)):
arff_writer.write(str(self.__RECORDS[i][0]) + "," + str(self.__RECORDS[i][1]) + "," +
str("%0.3f" % self.__RECORDS[i][2]) + "," + str("%0.3f" % self.__RECORDS[i][3]) + "," +
str(self.__RECORDS[i][4]) + "\n")
arff_writer.close()
print("You can find the generated files in " + output_path + "!")
| {
"content_hash": "f6c199f23598a01ed3c67b112fb9e716",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 118,
"avg_line_length": 38.282442748091604,
"alnum_prop": 0.5042871385842472,
"repo_name": "alipsgh/tornado",
"id": "2fa79e400e0b593944ba287610908f3bebfbe730",
"size": "5015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "streams/generators/mixed_stream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "224524"
}
],
"symlink_target": ""
} |
"""
docstring needed
:copyright: Copyright 2010-2017 by the NineML Python team, see AUTHORS.
:license: BSD-3, see LICENSE for details.
"""
from ...base import Parameter
from nineml.abstraction.expressions import Alias, Constant
from nineml.visitors import BaseVisitor
from nineml.units import Dimension, Unit
class TypesComponentValidator(BaseVisitor):
def __init__(self, component_class, **kwargs): # @UnusedVariable
BaseVisitor.__init__(self)
self.visit(component_class)
def action_parameter(self, parameter, **kwargs): # @UnusedVariable
assert isinstance(parameter, Parameter), \
"{} != {}".format(type(parameter), Parameter)
def action_alias(self, alias, **kwargs): # @UnusedVariable
assert isinstance(alias, Alias)
def action_constant(self, constant, **kwargs): # @UnusedVariable
try:
assert isinstance(constant, Constant)
except:
raise
def action_dimension(self, dimension, **kwargs): # @UnusedVariable
assert isinstance(dimension, Dimension)
def action_unit(self, unit, **kwargs): # @UnusedVariable
assert isinstance(unit, Unit)
| {
"content_hash": "b70625c21be920588630370484337296",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 71,
"avg_line_length": 31.783783783783782,
"alnum_prop": 0.6794217687074829,
"repo_name": "INCF/lib9ML",
"id": "130a78c99bc8c8ebaf43470aacd6d850e0d263f8",
"size": "1176",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nineml/abstraction/componentclass/visitors/validators/types.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "770"
},
{
"name": "Python",
"bytes": "716702"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import base64
import struct
import zlib
from Crypto.Cipher import ARC4
from Crypto.Hash import MD5
class BaseStantinkoDecryptor(object):
def __init__(self):
self.time = None
self.name = None
self.payload = None
self.errors = []
def _decrypt(self, data, key):
key_ = MD5.new(key).hexdigest()
cipher = ARC4.new(key_)
return cipher.decrypt(data)
class PDSIResponseDecryptor(BaseStantinkoDecryptor):
def __init__(self):
super(self.__class__, self).__init__()
def parse_response(self, response):
try:
data = base64.b64decode(response)
except TypeError as e:
self.errors.append(e)
return
param1 = b"g="
sep = b"&"
marker = b"***"
i1 = data.find(param1, 0)
i2 = data.find(sep, i1)
if i2 == -1:
self.payload = ""
return
time = data[i1+len(param1):i2]
beg_data = i2 + len(sep)
checksum = struct.unpack("<I", data[beg_data+4:beg_data+8])[0]
length = struct.unpack("<I", data[beg_data:beg_data+4])[0]
i4 = data.find(marker, beg_data)
cipher = data[i4+len(marker):]
headers_len = 8
self.name = data[beg_data+headers_len:i4]
data = self._decrypt(cipher, time)
if not (zlib.crc32(data) & 0xffffffff) == checksum:
self.errors.append("Invalid checksum")
if not len(data) == length:
self.errors.append("Invalid length")
try:
self.payload = zlib.decompress(data)
except zlib.error as e:
self.errors.append(e)
class PDSClientDecryptor(BaseStantinkoDecryptor):
def __init__(self):
super(self.__class__, self).__init__()
def parse_response(self, response):
param1 = b"g="
sep = b"&"
param2 = b"a="
i1 = response.find(param1)
i2 = response.find(sep, i1 + len(param1))
if i1 == -1:
self.errors.append("Invalid format: Missing param {}".format(param1))
return
time = response[i1+len(param1):i2]
i3 = response.find(param2, i2+len(sep))
if i3 == -1:
self.errors.append("Invalid format: Missing param {}".format(param2))
return
try:
data = base64.b64decode(response[i3+len(param2):])
except TypeError as e:
self.errors.append(e)
return
self.payload = self._decrypt(data, time)
class PDSResponseDecryptor(BaseStantinkoDecryptor):
def __init__(self):
super(self.__class__, self).__init__()
self.param = None
def parse_response(self, response):
try:
data = base64.b64decode(response)
except TypeError as e:
self.errors.append(e)
return
param1 = b"g="
sep = b"&"
param2 = b"p="
param3 = b"a="
marker = b"***"
i1 = data.find(param1, 0)
i2 = data.find(sep, i1)
if i2 == -1:
self.payload = ""
return
time = data[i1+len(param1):i2]
i3 = data.find(param2, i2+len(sep), data.find(marker))
self.param = "p"
if i3 == -1:
i3 = data.find(param3, i2+len(sep), data.find(marker))
self.param = "a"
if i3 == -1:
self.payload = ""
return
checksum = struct.unpack("<I", data[i3+6:i3+10])[0]
length = struct.unpack("<I", data[i3+2:i3+6])[0]
i4 = data.find(marker, i3+len(self.param)+1)
cipher = data[i4+len(marker):]
headers_len = 8
self.name = data[i3+len(self.param)+1+headers_len:i4]
data = self._decrypt(cipher, time)
if not (zlib.crc32(data) & 0xffffffff) == checksum:
self.errors.append("Invalid checksum")
if not len(data) == length:
self.errors.append("Invalid length")
try:
self.payload = zlib.decompress(data)
except zlib.error as e:
self.errors.append(e)
class BEDSClientDecryptor(BaseStantinkoDecryptor):
def __init__(self):
super(self.__class__, self).__init__()
def parse_response(self, response):
param1 = b"date="
sep = b"&"
param2 = b"data="
i1 = response.find(param1)
i2 = response.find(sep, i1 + len(param1))
if i1 == -1:
self.errors.append("Invalid format : Missing param {}".format(param1))
return
time = response[i1+len(param1):i2]
i3 = response.find(param2, i2+len(sep))
if i3 == -1:
self.errors.append("Invalid format: Missing param {}".format(param2))
return
try:
data = base64.b64decode(response[i3+len(param2):])
except TypeError as e:
self.errors.append(e)
return
self.payload = self._decrypt(data, time)
class BEDSResponseDecryptor(BaseStantinkoDecryptor):
def __init__(self):
super(self.__class__, self).__init__()
def parse_response(self, response):
try:
data = base64.b64decode(response)
except TypeError as e:
self.errors.append(e)
return
param1 = b"date="
param2 = b"data="
sep = b"&"
i1 = data.find(param1, 0)
if i1 == -1:
self.errors.append("Response doesn't contain '{}' parameter".format(param1))
i2 = data.find(sep, i1)
t = data[i1+len(param1):i2]
i3 = data.find(param2, i2+len(sep))
if i2 == -1 or i3 == -1:
self.errors.append("Response doesn't contain '{}' parameter".format(param2))
cipher = data[i3+len(param2):]
self.payload = self._decrypt(cipher, t)
# Heuristic: If the payload is more than 4KB, it might be a PE
if len(self.payload) > 4096:
marker1 = b"\xE5\x31\x99\xD0\xA7\x7B\xA6\x23"
i4 = self.payload.find(marker1)
i5 = self.payload.find(b"\x7B", i4 + len(marker1))
if i4 == -1 or i5 == -1:
self.errors.append("Response has an invalid format : marker(s) missing")
return
try:
self.payload = zlib.decompress(self.payload[i5+1:])
self.name = "binary"
except zlib.error as e:
self.errors.append(e)
| {
"content_hash": "0fb49918528009001b6601514668e7d5",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 88,
"avg_line_length": 27.747863247863247,
"alnum_prop": 0.5338056368396735,
"repo_name": "eset/malware-research",
"id": "1ed84b950245553723ebbad47e61d4641a12eb3b",
"size": "8188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stantinko/stantinko_crypto.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "2711"
},
{
"name": "Python",
"bytes": "184263"
},
{
"name": "Ruby",
"bytes": "7610"
},
{
"name": "Shell",
"bytes": "17711"
}
],
"symlink_target": ""
} |
"""
ggame package defines names that may be imported directly from ggame (legacy)
"""
from .__version__ import VERSION
from .asset import ImageAsset, TextAsset, CircleAsset, RectangleAsset
from .asset import PolygonAsset, LineAsset, EllipseAsset
from .asset import Frame, Color, LineStyle, BLACK, WHITE, BLACKLINE, WHITELINE
from .sound import SoundAsset, Sound
from .sprite import Sprite
from .app import App
from .event import KeyEvent, MouseEvent
| {
"content_hash": "bddcb9ba0ae6c604741a913ceb5adad4",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 78,
"avg_line_length": 40.90909090909091,
"alnum_prop": 0.7955555555555556,
"repo_name": "BrythonServer/ggame",
"id": "b470076c7ebfd2caf6e11ed6f1009d5608a6244b",
"size": "450",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ggame/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "203298"
},
{
"name": "Shell",
"bytes": "1010"
}
],
"symlink_target": ""
} |
import datetime
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPFound
from intranet3.utils.views import BaseView, MonthMixin
from intranet3.utils import excuses
from intranet3.helpers import next_month, previous_month
from intranet3.models import ApplicationConfig, User, Holiday
from intranet3.log import INFO_LOG, DEBUG_LOG, EXCEPTION_LOG
LOG = INFO_LOG(__name__)
DEBUG = DEBUG_LOG(__name__)
EXCEPTION = EXCEPTION_LOG(__name__)
@view_config(route_name='report_wrongtime_current', permission='admin')
class Current(BaseView):
def get(self):
today = datetime.date.today()
return HTTPFound(location=self.request.url_for('/report/wrongtime/annually', year=today.year))
class AnnuallyReportMixin(object):
def _annually_report(self, year):
year_start = datetime.date(year, 1, 1)
year_end = datetime.date(year, 12, 31)
excuses_error = None
config_obj = ApplicationConfig.get_current_config()
entries = self.session.query('user_id', 'date').from_statement("""
SELECT date_trunc('day', s.date) as date, s.user_id as user_id
FROM time_entry s
WHERE DATE(s.modified_ts) > s.date AND
s.date >= :year_start AND
s.date <= :year_end
GROUP BY date_trunc('day', s.date), s.user_id
""").params(year_start=year_start, year_end=year_end)
users = User.query.filter(User.is_active==True)\
.filter(User.is_not_client())\
.order_by(User.freelancer, User.name)
entries_grouped = {}
_excuses = excuses.wrongtime()
for user_id, date in entries:
month = date.month - 1
entry = entries_grouped.setdefault(user_id, [0]*12)
if date.strftime('%Y-%m-%d') not in _excuses.get(user_id, {}).keys():
entry[month] += 1
stats = {}
for user_id, entry in entries_grouped.iteritems():
stats_entry = stats.setdefault(user_id, [0])
stats_entry[0] = sum(entry)
return dict(
entries=entries_grouped,
stats=stats,
users=users,
year_start=year_start,
limit=config_obj.monthly_incorrect_time_record_limit,
excuses_error=excuses_error,
)
@view_config(route_name='report_wrongtime_annually', permission='admin')
class Annually(AnnuallyReportMixin, BaseView):
def get(self):
year = self.request.GET.get('year')
year = int(year)
return self._annually_report(year)
@view_config(route_name='report_wrongtime_monthly', permission='admin')
class Monthly(MonthMixin, BaseView):
def _group_by_user_monthly(self, data, user_id):
result = {}
_excuses = excuses.wrongtime()
for date, incorrect_count in data:
day_data = result.setdefault(date.day, [0, ''])
excuse = '-'
if date.strftime('%Y-%m-%d') in _excuses.get(user_id, {}).keys():
excuse = _excuses[user_id][date.strftime('%Y-%m-%d')]
day_data[0] = incorrect_count
day_data[1] = excuse
return result
def get(self):
user_id = self.request.GET.get('user_id')
month_start, month_end = self._get_month()
user = User.query.filter(User.id==user_id).one()
query = self.session.query('date', 'incorrect_count').from_statement("""
SELECT date, COUNT(date) as incorrect_count
FROM time_entry s
WHERE DATE(s.modified_ts) > s.date AND
s.user_id = :user_id AND
s.date >= :month_start AND
s.date <= :month_end
GROUP BY date
""").params(month_start=month_start, month_end=month_end, user_id=user.id)
data = query.all()
data = self._group_by_user_monthly(data, user.id)
holidays = Holiday.all()
return dict(
data=data,
user=user,
is_holiday=lambda date: Holiday.is_holiday(date, holidays=holidays),
month_start=month_start, month_end=month_end,
next_month=next_month(month_start),
prev_month=previous_month(month_start),
datetime=datetime,
)
| {
"content_hash": "67fafb01fab6ca8a377ec8273c05784d",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 102,
"avg_line_length": 34.82113821138211,
"alnum_prop": 0.5937427037123512,
"repo_name": "pytlakp/intranetref",
"id": "434bc4114d9ca8da916f257b47dc72fc596ec0c6",
"size": "4307",
"binary": false,
"copies": "1",
"ref": "refs/heads/pytlakp",
"path": "src/intranet3/views/report/wrongtime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "40508"
},
{
"name": "Python",
"bytes": "458587"
}
],
"symlink_target": ""
} |
"""
基于python 自身 Queue
"""
from scrapy.downloadermiddlewares.retry import RetryMiddleware
import logging
from scrapy.exceptions import NotConfigured
from scrapy.utils.response import response_status_message
import base64
try:
from Queue import Queue,Empty,Full
except ImportError:
# py3
from queue import Queue,Empty,Full
import random
logger = logging.getLogger(__name__)
queue=Queue(maxsize=10) # 用于存储代理 获取代理爬虫暂时没写
class Proxy(object):
def __init__(self,ip,port,user_pass=None,proxy_retry_times=0):
self._ip=ip
self._port=port
self._proxy_retry_times=proxy_retry_times # 使用重试的次数
self._user_pass=user_pass
@property
def get_ip(self):
return self._ip
@property
def get_port(self):
return self._port
@property
def get_proxy_retry_times(self):
return self._proxy_retry_times
@property
def join_ip_port(self):
return '%s:%s'%(self._ip,self._port)
def proxy_format(self):
return 'http://'+self.join_ip_port
def update_proxy_retry_times(self):
self._proxy_retry_times+=1
def proxy_authorization(self):
if not self._user_pass:
self.encoded_user_pass='Basic '
else:
self.encoded_user_pass = 'Basic '+base64.encodestring(self._user_pass)
class SetProxyMiddleware(object):
def __init__(self, settings):
self.queue_block=settings.getbool('QUEUE_BLOCK',default=False)
self.queue_timeout=settings.getint('QUEUE_TIMEOUT',default=0)
self.standby_proxy_list=settings.getlist('STANDBY_PROXY') # 备用的代理
# 格式 [(ip,port,user_pass),] 没有 user_pass 则为 None
if not self.standby_proxy_list:
logging.warning("STANDBY_PROXY is empty , This is bad") # 没有备用代理 无法在队列为空时 再次使用代理
if self.queue_block and self.queue_timeout>0.5:
logging.warning("QUEUE_TIMEOUT is too large , This is bad") # 代理队列等待时间过长 影响爬虫的并发效率
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_request(self, request, spider):
# 给请求设置代理
try:
proxy = queue.get(block=self.queue_block,timeout=self.queue_timeout)
self._set_proxy(request,proxy)
except Empty:
logging.warning("Proxy Queue is empty") # 代理队列为空的
try:
ip,port,user_pass=random.choice(self.standby_proxy_list)
user_pass=user_pass or None
proxy=Proxy(ip,port,user_pass)
self._set_proxy(request,proxy)
except ValueError:
logging.warning("STANDBY_PROXY formal error . example : [(ip,port,user_pass), (ip,port,user_pass)]")
except Exception as e:
logging.error("Unknown Error : %s"%e) # 产生未知的错误
def _set_proxy(self,request,proxy):
request.meta['proxy'] = proxy.proxy_format()
request.meta['proxy_instantiation']=proxy # 用于存储代理的实例 后期会有 pick_up_proxy 操作
request.headers['Proxy-Authorization'] = proxy.proxy_authorization()
logging.debug("User Proxy : %s"%proxy)
class ProxyQueueRetryMiddleware(RetryMiddleware):
# EXCEPTIONS_TO_RETRY = (defer.TimeoutError, TimeoutError, DNSLookupError,
# ConnectionRefusedError, ConnectionDone, ConnectError,
# ConnectionLost, TCPTimedOutError, ResponseFailed,
# IOError, TunnelError)
def __init__(self, settings):
if not settings.getbool('RETRY_ENABLED'):
raise NotConfigured
self.max_retry_times = settings.getint('RETRY_TIMES')
self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES'))
self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')
self.max_proxy_retry_times = settings.getint('PROXY_RETRY_TIMES') # 每个代理最多重试次数
super(ProxyQueueRetryMiddleware,self).__init__(settings)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_response(self, request, response, spider):
if request.meta.get('dont_retry', False):
self._pick_up_proxy(request)
return response
if response.status in self.retry_http_codes:
self._proxy_retry(request)
reason = response_status_message(response.status)
return self._retry(request, reason, spider) or response
return response
def process_exception(self, request, exception, spider):
if isinstance(exception, self.EXCEPTIONS_TO_RETRY) \
and not request.meta.get('dont_retry', False):
return self._retry(request, exception, spider)
def _retry(self, request, reason, spider):
retries = request.meta.get('retry_times', 0) + 1
if retries <= self.max_retry_times:
logger.debug("Retrying %(request)s (failed %(retries)d times): %(reason)s",
{'request': request, 'retries': retries, 'reason': reason},
extra={'spider': spider})
retryreq = request.copy()
retryreq.meta['retry_times'] = retries
retryreq.dont_filter = True
retryreq.priority = request.priority + self.priority_adjust
return retryreq
else:
logger.debug("Gave up retrying %(request)s (failed %(retries)d times): %(reason)s",
{'request': request, 'retries': retries, 'reason': reason},
extra={'spider': spider})
def _pick_up_proxy(self,request):
try:
queue.put(request.meta['proxy_instantiation'],block=False)
logging.debug("pick up proxy %s to queue"%request.meta['proxy'])
except Full:
logging.warning('proxy queue is full')
except KeyError:
logging.warning("didn't use to proxy : %s"%request.url)
except Exception as e:
logging.error("unknown error : %s"%e)
def _proxy_retry(self,request):
try:
proxy=request.meta['proxy_instantiation']
proxy.update_proxy_retry_times()
if proxy.get_proxy_retry_times()<=self.max_proxy_retry_times:
logger.debug("retrying proxy : %s , number : %s"%(proxy.proxy_format(),proxy.get_proxy_retry_times))
self._pick_up_proxy(request)
else:
logger.debug("gave up retrying proxy : %s"%proxy.proxy_format())
except KeyError:
logging.warning("didn't use to proxy : %s"%request.url)
except Exception as e:
logging.error("unknown error : %s"%e)
| {
"content_hash": "129b4931cd091954b217eb1491cda138",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 118,
"avg_line_length": 38.08,
"alnum_prop": 0.6143457382953181,
"repo_name": "1398491991/scrapyProxyQueue",
"id": "55f67c1e097fa019148f86ab1c4f9d942488f992",
"size": "6924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ProxyQueueMiddleware.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11329"
}
],
"symlink_target": ""
} |
"""Support for Tellstick sensors."""
from collections import namedtuple
import logging
from tellcore import telldus
import tellcore.constants as tellcore_constants
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
CONF_ID,
CONF_NAME,
CONF_PROTOCOL,
PERCENTAGE,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DatatypeDescription = namedtuple("DatatypeDescription", ["name", "unit"])
CONF_DATATYPE_MASK = "datatype_mask"
CONF_ONLY_NAMED = "only_named"
CONF_TEMPERATURE_SCALE = "temperature_scale"
CONF_MODEL = "model"
DEFAULT_DATATYPE_MASK = 127
DEFAULT_TEMPERATURE_SCALE = TEMP_CELSIUS
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_TEMPERATURE_SCALE, default=DEFAULT_TEMPERATURE_SCALE
): cv.string,
vol.Optional(
CONF_DATATYPE_MASK, default=DEFAULT_DATATYPE_MASK
): cv.positive_int,
vol.Optional(CONF_ONLY_NAMED, default=[]): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_ID): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_PROTOCOL): cv.string,
vol.Optional(CONF_MODEL): cv.string,
}
)
],
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Tellstick sensors."""
sensor_value_descriptions = {
tellcore_constants.TELLSTICK_TEMPERATURE: DatatypeDescription(
"temperature", config.get(CONF_TEMPERATURE_SCALE)
),
tellcore_constants.TELLSTICK_HUMIDITY: DatatypeDescription(
"humidity", PERCENTAGE
),
tellcore_constants.TELLSTICK_RAINRATE: DatatypeDescription("rain rate", ""),
tellcore_constants.TELLSTICK_RAINTOTAL: DatatypeDescription("rain total", ""),
tellcore_constants.TELLSTICK_WINDDIRECTION: DatatypeDescription(
"wind direction", ""
),
tellcore_constants.TELLSTICK_WINDAVERAGE: DatatypeDescription(
"wind average", ""
),
tellcore_constants.TELLSTICK_WINDGUST: DatatypeDescription("wind gust", ""),
}
try:
tellcore_lib = telldus.TelldusCore()
except OSError:
_LOGGER.exception("Could not initialize Tellstick")
return
sensors = []
datatype_mask = config.get(CONF_DATATYPE_MASK)
if config[CONF_ONLY_NAMED]:
named_sensors = {}
for named_sensor in config[CONF_ONLY_NAMED]:
name = named_sensor[CONF_NAME]
proto = named_sensor.get(CONF_PROTOCOL)
model = named_sensor.get(CONF_MODEL)
id_ = named_sensor[CONF_ID]
if proto is not None:
if model is not None:
named_sensors[f"{proto}{model}{id_}"] = name
else:
named_sensors[f"{proto}{id_}"] = name
else:
named_sensors[id_] = name
for tellcore_sensor in tellcore_lib.sensors():
if not config[CONF_ONLY_NAMED]:
sensor_name = str(tellcore_sensor.id)
else:
proto_id = f"{tellcore_sensor.protocol}{tellcore_sensor.id}"
proto_model_id = "{}{}{}".format(
tellcore_sensor.protocol, tellcore_sensor.model, tellcore_sensor.id
)
if tellcore_sensor.id in named_sensors:
sensor_name = named_sensors[tellcore_sensor.id]
elif proto_id in named_sensors:
sensor_name = named_sensors[proto_id]
elif proto_model_id in named_sensors:
sensor_name = named_sensors[proto_model_id]
else:
continue
for datatype in sensor_value_descriptions:
if datatype & datatype_mask and tellcore_sensor.has_value(datatype):
sensor_info = sensor_value_descriptions[datatype]
sensors.append(
TellstickSensor(sensor_name, tellcore_sensor, datatype, sensor_info)
)
add_entities(sensors)
class TellstickSensor(SensorEntity):
"""Representation of a Tellstick sensor."""
def __init__(self, name, tellcore_sensor, datatype, sensor_info):
"""Initialize the sensor."""
self._datatype = datatype
self._tellcore_sensor = tellcore_sensor
self._unit_of_measurement = sensor_info.unit or None
self._value = None
self._name = f"{name} {sensor_info.name}"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._value
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Update tellstick sensor."""
self._value = self._tellcore_sensor.value(self._datatype).value
| {
"content_hash": "43848a00a0297ca9eda2115ded4a80e3",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 88,
"avg_line_length": 33.27388535031847,
"alnum_prop": 0.5997320061255743,
"repo_name": "kennedyshead/home-assistant",
"id": "f58c5916bfb1cd2707d4ce6a510e3f4e96159fc6",
"size": "5224",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/tellstick/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
"""Support for HomeMatic covers."""
import logging
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
CoverEntity,
)
from .const import ATTR_DISCOVER_DEVICES
from .entity import HMDevice
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the platform."""
if discovery_info is None:
return
devices = []
for conf in discovery_info[ATTR_DISCOVER_DEVICES]:
new_device = HMCover(conf)
devices.append(new_device)
add_entities(devices, True)
class HMCover(HMDevice, CoverEntity):
"""Representation a HomeMatic Cover."""
@property
def current_cover_position(self):
"""
Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return int(self._hm_get_state() * 100)
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
if ATTR_POSITION in kwargs:
position = float(kwargs[ATTR_POSITION])
position = min(100, max(0, position))
level = position / 100.0
self._hmdevice.set_level(level, self._channel)
@property
def is_closed(self):
"""Return if the cover is closed."""
if self.current_cover_position is not None:
return self.current_cover_position == 0
return None
def open_cover(self, **kwargs):
"""Open the cover."""
self._hmdevice.move_up(self._channel)
def close_cover(self, **kwargs):
"""Close the cover."""
self._hmdevice.move_down(self._channel)
def stop_cover(self, **kwargs):
"""Stop the device if in motion."""
self._hmdevice.stop(self._channel)
def _init_data_struct(self):
"""Generate a data dictionary (self._data) from metadata."""
self._state = "LEVEL"
self._data.update({self._state: None})
if "LEVEL_2" in self._hmdevice.WRITENODE:
self._data.update({"LEVEL_2": None})
@property
def current_cover_tilt_position(self):
"""Return current position of cover tilt.
None is unknown, 0 is closed, 100 is fully open.
"""
if "LEVEL_2" not in self._data:
return None
return int(self._data.get("LEVEL_2", 0) * 100)
def set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
if "LEVEL_2" in self._data and ATTR_TILT_POSITION in kwargs:
position = float(kwargs[ATTR_TILT_POSITION])
position = min(100, max(0, position))
level = position / 100.0
self._hmdevice.set_cover_tilt_position(level, self._channel)
def open_cover_tilt(self, **kwargs):
"""Open the cover tilt."""
if "LEVEL_2" in self._data:
self._hmdevice.open_slats()
def close_cover_tilt(self, **kwargs):
"""Close the cover tilt."""
if "LEVEL_2" in self._data:
self._hmdevice.close_slats()
def stop_cover_tilt(self, **kwargs):
"""Stop cover tilt."""
if "LEVEL_2" in self._data:
self.stop_cover(**kwargs)
| {
"content_hash": "b20813f15bdfc12dfeecc045b4d2ac67",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 72,
"avg_line_length": 30.130841121495326,
"alnum_prop": 0.5949131513647643,
"repo_name": "robbiet480/home-assistant",
"id": "a520c08e4789fa2f3dee6b59e16c37fc68fb2461",
"size": "3224",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/homematic/cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18837456"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
'''
test_dns_sprockets_impl - Tests DNSSprocketsImpl implementation class.
.. Copyright (c) 2015 Neustar, Inc. All rights reserved.
.. See COPYRIGHT.txt for full notice. See LICENSE.txt for terms and conditions.
'''
# pylint: skip-file
import dns_sprockets_lib.utils as utils
import dns_sprockets_lib.loaders as loaders
import dns_sprockets_lib.validators as validators
from dns_sprockets_lib.dns_sprockets_impl import DNSSprocketsImpl
def test_SprocketImpl():
class TestArgs(object):
zone = '001.cst.net'
loader = 'file'
source = 'dns_sprockets_lib/tests/data/001.cst.net.'
include_tests = ['soa_origin', 'soa_unique']
exclude_tests = ['soa_unique']
force_dnssec_type = 'detect'
errors_only = False
defines = ['file_allow_include=0']
file_allow_include = '0'
avail_loaders = utils.public_modules_in_package(loaders, ['tests'])
avail_tests = utils.public_modules_in_package(validators, ['tests'])
test_args_inst = TestArgs()
sprocket = DNSSprocketsImpl(avail_loaders, avail_tests, test_args_inst)
(_, test_cnt, err_cnt) = sprocket.run()
assert test_cnt == 1
assert err_cnt == 0
# end of file
| {
"content_hash": "38171c07311bdafd55a3c89038c516cd",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 80,
"avg_line_length": 30.94871794871795,
"alnum_prop": 0.676056338028169,
"repo_name": "ultradns/dns_sprockets",
"id": "bcfc041160de5ccc65be3c77b91564b0cf0abe6e",
"size": "1207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dns_sprockets_lib/tests/test_dns_sprockets_impl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "2920"
},
{
"name": "Python",
"bytes": "127740"
}
],
"symlink_target": ""
} |
from oslo_utils.fixture import uuidsentinel
from nova.compute import power_state
from nova.compute import vm_states
from nova import objects
TEST_FLAVOR = objects.flavor.Flavor(
memory_mb=2048,
swap=0,
vcpu_weight=None,
root_gb=10, id=2,
name=u'm1.small',
ephemeral_gb=0,
rxtx_factor=1.0,
flavorid=uuidsentinel.flav_id,
vcpus=1)
TEST_INSTANCE = objects.Instance(
id=1,
uuid=uuidsentinel.inst_id,
display_name='Fake Instance',
root_gb=10,
ephemeral_gb=0,
instance_type_id=TEST_FLAVOR.id,
system_metadata={'image_os_distro': 'rhel'},
host='host1',
flavor=TEST_FLAVOR,
task_state=None,
vm_state=vm_states.STOPPED,
power_state=power_state.SHUTDOWN,
)
IMAGE1 = {
'id': uuidsentinel.img_id,
'name': 'image1',
'size': 300,
'container_format': 'bare',
'disk_format': 'raw',
'checksum': 'b518a8ba2b152b5607aceb5703fac072',
}
TEST_IMAGE1 = objects.image_meta.ImageMeta.from_dict(IMAGE1)
| {
"content_hash": "0eb636573b5b18f7b7318298ec1b9ba4",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 60,
"avg_line_length": 23.571428571428573,
"alnum_prop": 0.6676767676767676,
"repo_name": "klmitch/nova",
"id": "dedb6af7db1071c290947f461f364d0eb3c45634",
"size": "1599",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/tests/unit/virt/powervm/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "851"
},
{
"name": "HTML",
"bytes": "1386"
},
{
"name": "PHP",
"bytes": "44222"
},
{
"name": "Python",
"bytes": "22328409"
},
{
"name": "Shell",
"bytes": "29138"
},
{
"name": "Smarty",
"bytes": "405441"
}
],
"symlink_target": ""
} |
import vtk.qt
try:
import PyQt4
vtk.qt.PyQtImpl = "PyQt4"
except ImportError:
try:
import PySide
vtk.qt.PyQtImpl = "PySide"
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
from vtk.qt.QVTKRenderWindowInteractor import *
if __name__ == "__main__":
print(PyQtImpl)
QVTKRenderWidgetConeExample()
| {
"content_hash": "6655dd6b2c94bc6891bb1a910b1febba",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 62,
"avg_line_length": 21.823529411764707,
"alnum_prop": 0.6630727762803235,
"repo_name": "sumedhasingla/VTK",
"id": "dd5d1def585549ff85516cff003c1d100db7217d",
"size": "371",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Wrapping/Python/vtk/qt4/QVTKRenderWindowInteractor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "37444"
},
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "46626621"
},
{
"name": "C++",
"bytes": "68896127"
},
{
"name": "CMake",
"bytes": "1593338"
},
{
"name": "CSS",
"bytes": "186729"
},
{
"name": "Cuda",
"bytes": "29062"
},
{
"name": "D",
"bytes": "2081"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "GLSL",
"bytes": "216632"
},
{
"name": "Groff",
"bytes": "65394"
},
{
"name": "HTML",
"bytes": "292104"
},
{
"name": "Java",
"bytes": "147449"
},
{
"name": "JavaScript",
"bytes": "1131891"
},
{
"name": "Lex",
"bytes": "45341"
},
{
"name": "Objective-C",
"bytes": "22264"
},
{
"name": "Objective-C++",
"bytes": "190908"
},
{
"name": "Perl",
"bytes": "173168"
},
{
"name": "Prolog",
"bytes": "4406"
},
{
"name": "Python",
"bytes": "15749508"
},
{
"name": "Shell",
"bytes": "74255"
},
{
"name": "Slash",
"bytes": "1476"
},
{
"name": "Smarty",
"bytes": "1325"
},
{
"name": "Tcl",
"bytes": "1406812"
},
{
"name": "Yacc",
"bytes": "174577"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from wagtail.wagtailadmin.edit_handlers import FieldPanel, MultiFieldPanel, PageChooserPanel
from urlparse import urlparse
class Redirect(models.Model):
old_path = models.CharField(verbose_name=_("Redirect from"), max_length=255, unique=True, db_index=True)
site = models.ForeignKey('wagtailcore.Site', null=True, blank=True, related_name='redirects', db_index=True, editable=False)
is_permanent = models.BooleanField(verbose_name=_("Permanent"), default=True, help_text=_("Recommended. Permanent redirects ensure search engines forget the old page (the 'Redirect from') and index the new page instead.") )
redirect_page = models.ForeignKey('wagtailcore.Page', verbose_name=_("Redirect to a page"), null=True, blank=True)
redirect_link = models.URLField(verbose_name=_("Redirect to any URL"), blank=True)
@property
def title(self):
return self.old_path
@property
def link(self):
if self.redirect_page:
return self.redirect_page.url
else:
return self.redirect_link
def get_is_permanent_display(self):
if self.is_permanent:
return "permanent"
else:
return "temporary"
@classmethod
def get_for_site(cls, site=None):
if site:
return cls.objects.filter(models.Q(site=site) | models.Q(site=None))
else:
return cls.objects.all()
@staticmethod
def normalise_path(url):
# Parse url
url_parsed = urlparse(url)
# Path must start with / but not end with /
path = url_parsed[2]
if not path.startswith('/'):
path = '/' + path
if path.endswith('/'):
path = path[:-1]
# Query string components must be sorted alphabetically
query_string = url_parsed[4]
query_string_components = query_string.split('&')
query_string = '&'.join(sorted(query_string_components))
# Add query string to path
if query_string:
path = path + '?' + query_string
return path
def clean(self):
# Normalise old path
self.old_path = Redirect.normalise_path(self.old_path)
Redirect.content_panels = [
MultiFieldPanel([
FieldPanel('old_path'),
FieldPanel('is_permanent'),
PageChooserPanel('redirect_page'),
FieldPanel('redirect_link'),
])
]
| {
"content_hash": "663e81cb94cf2919275c0967cde70a93",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 227,
"avg_line_length": 33.04,
"alnum_prop": 0.6347861178369653,
"repo_name": "sahat/wagtail",
"id": "6440ad383ffb0ead2b65bb15839946251d9b0614",
"size": "2478",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "wagtail/wagtailredirects/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('presentations', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='presentation',
name='author',
field=models.CharField(default='', max_length=1024),
preserve_default=False,
),
migrations.AddField(
model_name='presentation',
name='description',
field=models.TextField(default=''),
preserve_default=False,
),
]
| {
"content_hash": "38ac312dc995f1c9cdd20cd622395d64",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 64,
"avg_line_length": 24.88,
"alnum_prop": 0.5691318327974276,
"repo_name": "CIGIHub/django-presentations",
"id": "a36071f6232714f989dc57f54bb11c7e3edd9b0b",
"size": "646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "presentations/migrations/0002_auto_20150420_1934.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3318"
},
{
"name": "Python",
"bytes": "7465"
}
],
"symlink_target": ""
} |
import re
from ztag.annotation import Annotation
from ztag.annotation import OperatingSystem
from ztag import protocols
import ztag.test
class FtpBulletproofFtpd(Annotation):
protocol = protocols.FTP
subprotocol = protocols.FTP.BANNER
port = None
impl_re = re.compile("^220[- ]BulletProof FTP Server ready", re.IGNORECASE)
tests = {
"FtpBulletproofFtpd_1": {
"global_metadata": {
"os": OperatingSystem.WINDOWS
},
"local_metadata": {
"product": "BulletProof FTP"
}
}
}
def process(self, obj, meta):
banner = obj["banner"]
if self.impl_re.search(banner):
meta.global_metadata.os = OperatingSystem.WINDOWS
meta.local_metadata.product = "BulletProof FTP"
return meta
""" Tests
"220 BulletProof FTP Server ready ...\r\n"
"220 BulletProof FTP Server ready ...\r\n"
"220 BulletProof FTP Server ready ...\r\n"
"220 BulletProof FTP Server ready ...\r\n"
"220 BulletProof FTP Server ready ...\r\n"
"220 BulletProof FTP Server ready ...\r\n"
"220 BulletProof FTP Server ready ...\r\n"
"220 BulletProof FTP Server ready ...\r\n"
"220 BulletProof FTP Server ready ...\r\n"
"220 BulletProof FTP Server ready ...\r\n"
"220 BulletProof FTP Server ready ...\r\n"
"220 BulletProof FTP Server ready ...\r\n"
"220 BulletProof FTP Server ready ...\r\n"
"220 BulletProof FTP Server ready ...\r\n"
"220 BulletProof FTP Server ready ...\r\n"
"220 BulletProof FTP Server ready ...\r\n"
"""
| {
"content_hash": "073ce75808e495d746ba0819bc3e7d7b",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 31.096153846153847,
"alnum_prop": 0.6147186147186147,
"repo_name": "zmap/ztag",
"id": "aa47c9d589038f81158f8a579afb1a969deb024c",
"size": "1617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ztag/annotations/FtpBulletproofFtpd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "604209"
}
],
"symlink_target": ""
} |
"""Support for SwitchBot curtains."""
from __future__ import annotations
import logging
from typing import Any
import switchbot
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_POSITION,
CoverDeviceClass,
CoverEntity,
CoverEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.restore_state import RestoreEntity
from .const import DOMAIN
from .coordinator import SwitchbotDataUpdateCoordinator
from .entity import SwitchbotEntity
# Initialize the logger
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 0
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Switchbot curtain based on a config entry."""
coordinator: SwitchbotDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities([SwitchBotCurtainEntity(coordinator)])
class SwitchBotCurtainEntity(SwitchbotEntity, CoverEntity, RestoreEntity):
"""Representation of a Switchbot."""
_device: switchbot.SwitchbotCurtain
_attr_device_class = CoverDeviceClass.CURTAIN
_attr_supported_features = (
CoverEntityFeature.OPEN
| CoverEntityFeature.CLOSE
| CoverEntityFeature.STOP
| CoverEntityFeature.SET_POSITION
)
def __init__(self, coordinator: SwitchbotDataUpdateCoordinator) -> None:
"""Initialize the Switchbot."""
super().__init__(coordinator)
self._attr_is_closed = None
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added."""
await super().async_added_to_hass()
last_state = await self.async_get_last_state()
if not last_state or ATTR_CURRENT_POSITION not in last_state.attributes:
return
self._attr_current_cover_position = last_state.attributes.get(
ATTR_CURRENT_POSITION
)
self._last_run_success = last_state.attributes.get("last_run_success")
if self._attr_current_cover_position is not None:
self._attr_is_closed = self._attr_current_cover_position <= 20
async def async_open_cover(self, **kwargs: Any) -> None:
"""Open the curtain."""
_LOGGER.debug("Switchbot to open curtain %s", self._address)
self._last_run_success = bool(await self._device.open())
self.async_write_ha_state()
async def async_close_cover(self, **kwargs: Any) -> None:
"""Close the curtain."""
_LOGGER.debug("Switchbot to close the curtain %s", self._address)
self._last_run_success = bool(await self._device.close())
self.async_write_ha_state()
async def async_stop_cover(self, **kwargs: Any) -> None:
"""Stop the moving of this device."""
_LOGGER.debug("Switchbot to stop %s", self._address)
self._last_run_success = bool(await self._device.stop())
self.async_write_ha_state()
async def async_set_cover_position(self, **kwargs: Any) -> None:
"""Move the cover shutter to a specific position."""
position = kwargs.get(ATTR_POSITION)
_LOGGER.debug("Switchbot to move at %d %s", position, self._address)
self._last_run_success = bool(await self._device.set_position(position))
self.async_write_ha_state()
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self._attr_current_cover_position = self.data["data"]["position"]
self._attr_is_closed = self.data["data"]["position"] <= 20
self._attr_is_opening = self.data["data"]["inMotion"]
self.async_write_ha_state()
| {
"content_hash": "4c5bacd18b4e077e3d7ff8a29d5f6903",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 84,
"avg_line_length": 36.65384615384615,
"alnum_prop": 0.6775970619097587,
"repo_name": "mezz64/home-assistant",
"id": "696c9455f28bd5b2dbeb3c2607909f6568cd8fe3",
"size": "3812",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/switchbot/cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
"""Naming set for Korean"""
import copy
import basepart
class ComposerElementKorean(basepart.ComposerElementBase):
"""Korean name composer - inspired by 이강성, 『파이썬 3 바이블』"""
character = ( \
tuple('ㄱㄲㄴㄷㄸㄹㅁㅂㅃㅅㅆㅇㅈㅉㅊㅋㅌㅍㅎ'), \
tuple('ㅏㅐㅑㅒㅓㅔㅕㅖㅗㅘㅙㅚㅛㅜㅝㅞㅟㅠㅡㅢㅣ'), \
tuple(' ㄱㄲㄳㄴㄵㄶㄷㄹㄺㄻㄼㄽㄾㄿㅀㅁㅂㅄㅅㅆㅇㅈㅊㅋㅌㅍㅎ') \
)
character_recommend = ( \
tuple('ㄱㄴㄷㅁㅂㅅㅇㅈㅎ'), \
tuple('ㅏㅓㅗㅜㅡㅣ'), \
tuple(' ㄴㅇ') \
)
num_character_all = \
len(character[0]) * \
len(character[1]) * \
len(character[2])
num_character_recommend = \
len(character_recommend[0]) * \
len(character_recommend[1]) * \
len(character_recommend[2])
initial = basepart.ListBase()
medial = basepart.ListBase()
final = basepart.ListBase()
def __init__(self, initial=None, medial=None, final=None):
pass # See the metaclass
def compose(self):
"""Compose the Korean name"""
character = self.character
list_original = [self.initial, self.medial, self.final]
list_process = [basepart.ListBase(), basepart.ListBase(), basepart.ListBase()]
ingredient = [basepart.ListBase(), basepart.ListBase(), basepart.ListBase()]
# Check type and init list_process
for (idx, elem) in enumerate(list_original):
if isinstance(elem, basepart.ListBase):
list_process[idx] = copy.deepcopy(elem)
elif elem is None:
list_process[idx] = basepart.IncludeList(character[idx])
else:
raise basepart.NamingLibException('Check composer input type')
for (elem, characterset) in zip(list_process, (character[0], character[1], character[2])):
# Change str to index
elem.digitize(characterset)
# Check index whether that is out of range
elem.check_element_index(characterset)
# Fill the ingredient
for (idx, elem) in enumerate(list_process):
ingredient[idx] = elem.choice(character[idx])
result_int = 0xac00 + ((ingredient[0] * 21) + ingredient[1]) * 28 + ingredient[2]
result_char = chr(result_int)
self.result = result_char
return result_char
| {
"content_hash": "cfa96614e17f431f4b1f93a4d9a9ea8f",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 98,
"avg_line_length": 35.296875,
"alnum_prop": 0.601593625498008,
"repo_name": "pauis/name",
"id": "22f777acc96fe3c605219d03e3840eaa673df6a0",
"size": "2449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ko.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9817"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
import djorm_pgfulltext.fields
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='search_index',
field=djorm_pgfulltext.fields.VectorField(db_index=True, default='', editable=False, null=True, serialize=False),
),
]
| {
"content_hash": "2d3453a87dfca0d89383c80c97e74218",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 125,
"avg_line_length": 24.42105263157895,
"alnum_prop": 0.6293103448275862,
"repo_name": "evg-dev/pyblog",
"id": "5e80852d001e4e66940690bd316e481077dc71cb",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/migrations/0002_post_search_index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7266"
},
{
"name": "HTML",
"bytes": "13576"
},
{
"name": "JavaScript",
"bytes": "6159"
},
{
"name": "Python",
"bytes": "35319"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.servicefabric import ServiceFabricManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-servicefabric
# USAGE
python get_a_service.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ServiceFabricManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.services.get(
resource_group_name="resRg",
cluster_name="myCluster",
application_name="myApp",
service_name="myService",
)
print(response)
# x-ms-original-file: specification/servicefabric/resource-manager/Microsoft.ServiceFabric/stable/2021-06-01/examples/ServiceGetOperation_example.json
if __name__ == "__main__":
main()
| {
"content_hash": "a6bc0a7e4539748a3102e7602814b689",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 150,
"avg_line_length": 33.65714285714286,
"alnum_prop": 0.7300509337860781,
"repo_name": "Azure/azure-sdk-for-python",
"id": "9021133d080cf7283739505b405bdde067180c74",
"size": "1646",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/servicefabric/azure-mgmt-servicefabric/generated_samples/get_a_service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import datetime
import warnings
from django.conf import settings
from haystack.backends import BaseEngine
from haystack.constants import DJANGO_CT
from haystack.exceptions import MissingDependency
from haystack.utils import get_identifier, get_model_ct
# Backport support
from .elasticsearch_backend import (ElasticsearchSearchBackend,
ElasticsearchSearchQuery)
try:
import elasticsearch
if not ((2, 0, 0) <= elasticsearch.__version__ < (3, 0, 0)):
raise ImportError
from elasticsearch.helpers import bulk, scan
warnings.warn(
"ElasticSearch 2.x support deprecated, will be removed in 4.0",
DeprecationWarning,
)
except ImportError:
raise MissingDependency(
"The 'elasticsearch2' backend requires the \
installation of 'elasticsearch>=2.0.0,<3.0.0'. \
Please refer to the documentation."
)
class Elasticsearch2SearchBackend(ElasticsearchSearchBackend):
def __init__(self, connection_alias, **connection_options):
super(Elasticsearch2SearchBackend, self).__init__(
connection_alias, **connection_options)
self.content_field_name = None
def clear(self, models=None, commit=True):
"""
Clears the backend of all documents/objects for a collection of models.
:param models: List or tuple of models to clear.
:param commit: Not used.
"""
if models is not None:
assert isinstance(models, (list, tuple))
try:
if models is None:
self.conn.indices.delete(index=self.index_name, ignore=404)
self.setup_complete = False
self.existing_mapping = {}
self.content_field_name = None
else:
models_to_delete = []
for model in models:
models_to_delete.append("%s:%s" % (DJANGO_CT, get_model_ct(model)))
# Delete using scroll API
query = {
"query": {"query_string": {"query": " OR ".join(models_to_delete)}}
}
generator = scan(
self.conn,
query=query,
index=self.index_name,
**self._get_doc_type_option()
)
actions = (
{"_op_type": "delete", "_id": doc["_id"]} for doc in generator
)
bulk(
self.conn,
actions=actions,
index=self.index_name,
**self._get_doc_type_option()
)
self.conn.indices.refresh(index=self.index_name)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
if models is not None:
self.log.error(
"Failed to clear Elasticsearch index of models '%s': %s",
",".join(models_to_delete),
e,
exc_info=True,
)
else:
self.log.error(
"Failed to clear Elasticsearch index: %s", e, exc_info=True
)
def build_search_kwargs(
self,
query_string,
sort_by=None,
start_offset=0,
end_offset=None,
fields="",
highlight=False,
facets=None,
date_facets=None,
query_facets=None,
narrow_queries=None,
spelling_query=None,
within=None,
dwithin=None,
distance_point=None,
models=None,
limit_to_registered_models=None,
result_class=None,
):
kwargs = super(Elasticsearch2SearchBackend, self).build_search_kwargs(
query_string,
sort_by,
start_offset,
end_offset,
fields,
highlight,
spelling_query=spelling_query,
within=within,
dwithin=dwithin,
distance_point=distance_point,
models=models,
limit_to_registered_models=limit_to_registered_models,
result_class=result_class,
)
filters = []
if start_offset is not None:
kwargs["from"] = start_offset
if end_offset is not None:
kwargs["size"] = end_offset - start_offset
if narrow_queries is None:
narrow_queries = set()
if facets is not None:
kwargs.setdefault("aggs", {})
for facet_fieldname, extra_options in facets.items():
facet_options = {
"meta": {"_type": "terms"},
"terms": {"field": facet_fieldname},
}
if "order" in extra_options:
facet_options["meta"]["order"] = extra_options.pop("order")
# Special cases for options applied at the facet level (not the terms level).
if extra_options.pop("global_scope", False):
# Renamed "global_scope" since "global" is a python keyword.
facet_options["global"] = True
if "facet_filter" in extra_options:
facet_options["facet_filter"] = extra_options.pop("facet_filter")
facet_options["terms"].update(extra_options)
kwargs["aggs"][facet_fieldname] = facet_options
if date_facets is not None:
kwargs.setdefault("aggs", {})
for facet_fieldname, value in date_facets.items():
# Need to detect on gap_by & only add amount if it's more than one.
interval = value.get("gap_by").lower()
# Need to detect on amount (can't be applied on months or years).
if value.get("gap_amount", 1) != 1 and interval not in (
"month",
"year",
):
# Just the first character is valid for use.
interval = "%s%s" % (value["gap_amount"], interval[:1])
kwargs["aggs"][facet_fieldname] = {
"meta": {"_type": "date_histogram"},
"date_histogram": {"field": facet_fieldname, "interval": interval},
"aggs": {
facet_fieldname: {
"date_range": {
"field": facet_fieldname,
"ranges": [
{
"from": self._from_python(
value.get("start_date")
),
"to": self._from_python(value.get("end_date")),
}
],
}
}
},
}
if query_facets is not None:
kwargs.setdefault("aggs", {})
for facet_fieldname, value in query_facets:
kwargs["aggs"][facet_fieldname] = {
"meta": {"_type": "query"},
"filter": {"query_string": {"query": value}},
}
for q in narrow_queries:
filters.append({"query_string": {"query": q}})
# if we want to filter, change the query type to filteres
if filters:
kwargs["query"] = {"filtered": {"query": kwargs.pop("query")}}
filtered = kwargs["query"]["filtered"]
if "filter" in filtered:
if "bool" in filtered["filter"].keys():
another_filters = kwargs["query"]["filtered"]["filter"]["bool"][
"must"
]
else:
another_filters = [kwargs["query"]["filtered"]["filter"]]
else:
another_filters = filters
if len(another_filters) == 1:
kwargs["query"]["filtered"]["filter"] = another_filters[0]
else:
kwargs["query"]["filtered"]["filter"] = {
"bool": {"must": another_filters}
}
return kwargs
def more_like_this(
self,
model_instance,
additional_query_string=None,
start_offset=0,
end_offset=None,
models=None,
limit_to_registered_models=None,
result_class=None,
**kwargs
):
from haystack import connections
if not self.setup_complete:
self.setup()
# Deferred models will have a different class ("RealClass_Deferred_fieldname")
# which won't be in our registry:
model_klass = model_instance._meta.concrete_model
index = (
connections[self.connection_alias]
.get_unified_index()
.get_index(model_klass)
)
field_name = index.get_content_field()
params = {}
if start_offset is not None:
params["from_"] = start_offset
if end_offset is not None:
params["size"] = end_offset - start_offset
doc_id = get_identifier(model_instance)
try:
# More like this Query
# https://www.elastic.co/guide/en/elasticsearch/reference/2.2/query-dsl-mlt-query.html
mlt_query = {
"query": {
"more_like_this": {
"fields": [field_name],
"like": [{"_id": doc_id}],
}
}
}
narrow_queries = []
if additional_query_string and additional_query_string != "*:*":
additional_filter = {
"query": {"query_string": {"query": additional_query_string}}
}
narrow_queries.append(additional_filter)
if limit_to_registered_models is None:
limit_to_registered_models = getattr(
settings, "HAYSTACK_LIMIT_TO_REGISTERED_MODELS", True
)
if models and len(models):
model_choices = sorted(get_model_ct(model) for model in models)
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
model_filter = {"terms": {DJANGO_CT: model_choices}}
narrow_queries.append(model_filter)
if len(narrow_queries) > 0:
mlt_query = {
"query": {
"filtered": {
"query": mlt_query["query"],
"filter": {"bool": {"must": list(narrow_queries)}},
}
}
}
search_kwargs = dict(self._get_doc_type_option())
search_kwargs.update(params)
raw_results = self.conn.search(
body=mlt_query,
index=self.index_name,
_source=True,
**search_kwargs
)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
self.log.error(
"Failed to fetch More Like This from Elasticsearch for document '%s': %s",
doc_id,
e,
exc_info=True,
)
raw_results = {}
return self._process_results(raw_results, result_class=result_class)
def _process_results(
self,
raw_results,
highlight=False,
result_class=None,
distance_point=None,
geo_sort=False,
):
results = super(Elasticsearch2SearchBackend, self)._process_results(
raw_results, highlight, result_class, distance_point, geo_sort
)
facets = {}
if "aggregations" in raw_results:
facets = {"fields": {}, "dates": {}, "queries": {}}
for facet_fieldname, facet_info in raw_results["aggregations"].items():
facet_type = facet_info["meta"]["_type"]
if facet_type == "terms":
facets["fields"][facet_fieldname] = [
(individual["key"], individual["doc_count"])
for individual in facet_info["buckets"]
]
if "order" in facet_info["meta"]:
if facet_info["meta"]["order"] == "reverse_count":
srt = sorted(
facets["fields"][facet_fieldname], key=lambda x: x[1]
)
facets["fields"][facet_fieldname] = srt
elif facet_type == "date_histogram":
# Elasticsearch provides UTC timestamps with an extra three
# decimals of precision, which datetime barfs on.
facets["dates"][facet_fieldname] = [
(
datetime.datetime.utcfromtimestamp(
individual["key"] / 1000
),
individual["doc_count"],
)
for individual in facet_info["buckets"]
]
elif facet_type == "query":
facets["queries"][facet_fieldname] = facet_info["doc_count"]
results["facets"] = facets
return results
class Elasticsearch2SearchQuery(ElasticsearchSearchQuery):
pass
class Elasticsearch2SearchEngine(BaseEngine):
backend = Elasticsearch2SearchBackend
query = Elasticsearch2SearchQuery
| {
"content_hash": "75042dd9ff480aae7344853ae56de448",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 98,
"avg_line_length": 35.83460559796438,
"alnum_prop": 0.4800113612156501,
"repo_name": "reviewboard/reviewboard",
"id": "5241e0901109018a9d0f6f8595893bacdc2ffa6b",
"size": "14083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/search/search_backends/haystack_backports/elasticsearch2_backend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10167"
},
{
"name": "Dockerfile",
"bytes": "7721"
},
{
"name": "HTML",
"bytes": "226489"
},
{
"name": "JavaScript",
"bytes": "3991608"
},
{
"name": "Less",
"bytes": "438017"
},
{
"name": "Python",
"bytes": "9186415"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class FrameCapacity(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
FrameCapacity - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'sys_time': 'datetime',
'type': 'str'
}
self.attribute_map = {
'sys_time': 'sysTime',
'type': 'type'
}
self._sys_time = None
self._type = None
@property
def sys_time(self):
"""
Gets the sys_time of this FrameCapacity.
Time of notification
:return: The sys_time of this FrameCapacity.
:rtype: datetime
"""
return self._sys_time
@sys_time.setter
def sys_time(self, sys_time):
"""
Sets the sys_time of this FrameCapacity.
Time of notification
:param sys_time: The sys_time of this FrameCapacity.
:type: datetime
"""
self._sys_time = sys_time
@property
def type(self):
"""
Gets the type of this FrameCapacity.
Notification type
:return: The type of this FrameCapacity.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this FrameCapacity.
Notification type
:param type: The type of this FrameCapacity.
:type: str
"""
allowed_values = ["netStarted", "pathStateChanged", "pathAlert", "moteStateChanged", "joinFailed", "pingResponse", "invalidMIC", "dataPacketReceived", "ipPacketReceived", "packetSent", "cmdFinished", "configChanged", "configLoaded", "alarmOpened", "alarmClosed", "deviceHealthReport", "neighborHealthReport", "discoveryHealthReport", "rawMoteNotification", "serviceChanged", "apStateChanged", "managerStarted", "managerStopping", "optPhase", "pathAlert", "moteTrace", "frameCapacity", "apGpsSyncChanged"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type`, must be one of {0}"
.format(allowed_values)
)
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "23e7622967edb0e153b7d8fdfd7666d3",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 512,
"avg_line_length": 30.16083916083916,
"alnum_prop": 0.5699049385578484,
"repo_name": "realms-team/solmanager",
"id": "f48e08deb1e2d3db2467ba00b93f23c1f187e31c",
"size": "4330",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "libs/smartmeshsdk-REL-1.3.0.1/libs/VManagerSDK/vmanager/models/frame_capacity.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3408"
},
{
"name": "CSS",
"bytes": "1148"
},
{
"name": "HTML",
"bytes": "1568"
},
{
"name": "JavaScript",
"bytes": "1430296"
},
{
"name": "Makefile",
"bytes": "8195"
},
{
"name": "Python",
"bytes": "3428922"
},
{
"name": "Smarty",
"bytes": "5800"
}
],
"symlink_target": ""
} |
"""
Support for showing the date and the time.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.time_date/
"""
import logging
import homeassistant.util.dt as dt_util
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
OPTION_TYPES = {
'time': 'Time',
'date': 'Date',
'date_time': 'Date & Time',
'time_date': 'Time & Date',
'beat': 'Time (beat)',
'time_utc': 'Time (UTC)',
}
TIME_STR_FORMAT = "%H:%M"
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Time and Date sensor."""
if hass.config.time_zone is None:
_LOGGER.error("Timezone is not set in Home Assistant config")
return False
dev = []
for variable in config['display_options']:
if variable not in OPTION_TYPES:
_LOGGER.error('Option type: "%s" does not exist', variable)
else:
dev.append(TimeDateSensor(variable))
add_devices(dev)
# pylint: disable=too-few-public-methods
class TimeDateSensor(Entity):
"""Implementation of a Time and Date sensor."""
def __init__(self, option_type):
"""Initialize the sensor."""
self._name = OPTION_TYPES[option_type]
self.type = option_type
self._state = None
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if "date" in self.type and "time" in self.type:
return "mdi:calendar-clock"
elif "date" in self.type:
return "mdi:calendar"
else:
return "mdi:clock"
def update(self):
"""Get the latest data and updates the states."""
time_date = dt_util.utcnow()
time = dt_util.as_local(time_date).strftime(TIME_STR_FORMAT)
time_utc = time_date.strftime(TIME_STR_FORMAT)
date = dt_util.as_local(time_date).date().isoformat()
# Calculate the beat (Swatch Internet Time) time without date.
hours, minutes, seconds = time_date.strftime('%H:%M:%S').split(':')
beat = ((int(seconds) + (int(minutes) * 60) + ((int(hours) + 1) *
3600)) / 86.4)
if self.type == 'time':
self._state = time
elif self.type == 'date':
self._state = date
elif self.type == 'date_time':
self._state = date + ', ' + time
elif self.type == 'time_date':
self._state = time + ', ' + date
elif self.type == 'time_utc':
self._state = time_utc
elif self.type == 'beat':
self._state = '{0:.2f}'.format(beat)
| {
"content_hash": "07cd7feb25e930884a84070a1a1bb703",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 75,
"avg_line_length": 30.726315789473684,
"alnum_prop": 0.5707434052757794,
"repo_name": "Zyell/home-assistant",
"id": "e6e78fe8ff9094f9a6aca33cff20ee495ca95f1a",
"size": "2919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/sensor/time_date.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "798938"
},
{
"name": "Python",
"bytes": "771451"
},
{
"name": "Shell",
"bytes": "5097"
}
],
"symlink_target": ""
} |
import collections
import json
import os
import uuid
import mock
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common import identifier
from heat.common import template_format
from heat.common import urlfetch
from heat.engine import attributes
from heat.engine import environment
from heat.engine import properties
from heat.engine import resource
from heat.engine import resources
from heat.engine.resources import template_resource
from heat.engine import rsrc_defn
from heat.engine import stack as parser
from heat.engine import support
from heat.engine import template
from heat.tests import common
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
empty_template = {"HeatTemplateFormatVersion": "2012-12-12"}
class MyCloudResource(generic_rsrc.GenericResource):
pass
class ProviderTemplateTest(common.HeatTestCase):
def setUp(self):
super(ProviderTemplateTest, self).setUp()
resource._register_class('myCloud::ResourceType',
MyCloudResource)
def test_get_os_empty_registry(self):
# assertion: with an empty environment we get the correct
# default class.
env_str = {'resource_registry': {}}
env = environment.Environment(env_str)
cls = env.get_class('GenericResourceType', 'fred')
self.assertEqual(generic_rsrc.GenericResource, cls)
def test_get_mine_global_map(self):
# assertion: with a global rule we get the "mycloud" class.
env_str = {'resource_registry': {"OS::*": "myCloud::*"}}
env = environment.Environment(env_str)
cls = env.get_class('OS::ResourceType', 'fred')
self.assertEqual(MyCloudResource, cls)
def test_get_mine_type_map(self):
# assertion: with a global rule we get the "mycloud" class.
env_str = {'resource_registry': {
"OS::ResourceType": "myCloud::ResourceType"}}
env = environment.Environment(env_str)
cls = env.get_class('OS::ResourceType', 'fred')
self.assertEqual(MyCloudResource, cls)
def test_get_mine_resource_map(self):
# assertion: with a global rule we get the "mycloud" class.
env_str = {'resource_registry': {'resources': {'fred': {
"OS::ResourceType": "myCloud::ResourceType"}}}}
env = environment.Environment(env_str)
cls = env.get_class('OS::ResourceType', 'fred')
self.assertEqual(MyCloudResource, cls)
def test_get_os_no_match(self):
# assertion: make sure 'fred' doesn't match 'jerry'.
env_str = {'resource_registry': {'resources': {'jerry': {
"OS::ResourceType": "myCloud::ResourceType"}}}}
env = environment.Environment(env_str)
cls = env.get_class('GenericResourceType', 'fred')
self.assertEqual(generic_rsrc.GenericResource, cls)
def test_to_parameters(self):
"""Tests property conversion to parameter values."""
provider = {
'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {
'Foo': {'Type': 'String'},
'AList': {'Type': 'CommaDelimitedList'},
'MemList': {'Type': 'CommaDelimitedList'},
'ListEmpty': {'Type': 'CommaDelimitedList'},
'ANum': {'Type': 'Number'},
'AMap': {'Type': 'Json'},
},
'Outputs': {
'Foo': {'Value': 'bar'},
},
}
files = {'test_resource.template': json.dumps(provider)}
class DummyResource(generic_rsrc.GenericResource):
attributes_schema = {"Foo": attributes.Schema("A test attribute")}
properties_schema = {
"Foo": {"Type": "String"},
"AList": {"Type": "List"},
"MemList": {"Type": "List"},
"ListEmpty": {"Type": "List"},
"ANum": {"Type": "Number"},
"AMap": {"Type": "Map"}
}
env = environment.Environment()
resource._register_class('DummyResource', DummyResource)
env.load({'resource_registry':
{'DummyResource': 'test_resource.template'}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(empty_template, files=files,
env=env),
stack_id=str(uuid.uuid4()))
map_prop_val = {
"key1": "val1",
"key2": ["lval1", "lval2", "lval3"],
"key3": {
"key4": 4,
"key5": False
}
}
prop_vals = {
"Foo": "Bar",
"AList": ["one", "two", "three"],
"MemList": [collections.OrderedDict([
('key', 'name'),
('value', 'three'),
]), collections.OrderedDict([
('key', 'name'),
('value', 'four'),
])],
"ListEmpty": [],
"ANum": 5,
"AMap": map_prop_val,
}
definition = rsrc_defn.ResourceDefinition('test_t_res',
'DummyResource',
prop_vals)
temp_res = template_resource.TemplateResource('test_t_res',
definition, stack)
temp_res.validate()
converted_params = temp_res.child_params()
self.assertTrue(converted_params)
for key in DummyResource.properties_schema:
self.assertIn(key, converted_params)
# verify String conversion
self.assertEqual("Bar", converted_params.get("Foo"))
# verify List conversion
self.assertEqual("one,two,three", converted_params.get("AList"))
# verify Member List conversion
mem_exp = ('.member.0.key=name,'
'.member.0.value=three,'
'.member.1.key=name,'
'.member.1.value=four')
self.assertEqual(sorted(mem_exp.split(',')),
sorted(converted_params.get("MemList").split(',')))
# verify Number conversion
self.assertEqual(5, converted_params.get("ANum"))
# verify Map conversion
self.assertEqual(map_prop_val, converted_params.get("AMap"))
with mock.patch.object(properties.Properties,
'get_user_value') as m_get:
m_get.side_effect = ValueError('boom')
# If the property doesn't exist on INIT, return default value
temp_res.action = temp_res.INIT
converted_params = temp_res.child_params()
for key in DummyResource.properties_schema:
self.assertIn(key, converted_params)
self.assertEqual({}, converted_params['AMap'])
self.assertEqual(0, converted_params['ANum'])
# If the property doesn't exist past INIT, then error out
temp_res.action = temp_res.CREATE
self.assertRaises(ValueError, temp_res.child_params)
def test_attributes_extra(self):
provider = {
'HeatTemplateFormatVersion': '2012-12-12',
'Outputs': {
'Foo': {'Value': 'bar'},
'Blarg': {'Value': 'wibble'},
},
}
files = {'test_resource.template': json.dumps(provider)}
class DummyResource(generic_rsrc.GenericResource):
attributes_schema = {"Foo": attributes.Schema("A test attribute")}
env = environment.Environment()
resource._register_class('DummyResource', DummyResource)
env.load({'resource_registry':
{'DummyResource': 'test_resource.template'}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(empty_template, files=files,
env=env),
stack_id=str(uuid.uuid4()))
definition = rsrc_defn.ResourceDefinition('test_t_res',
"DummyResource")
temp_res = template_resource.TemplateResource('test_t_res',
definition, stack)
self.assertIsNone(temp_res.validate())
def test_attributes_missing_based_on_class(self):
provider = {
'HeatTemplateFormatVersion': '2012-12-12',
'Outputs': {
'Blarg': {'Value': 'wibble'},
},
}
files = {'test_resource.template': json.dumps(provider)}
class DummyResource(generic_rsrc.GenericResource):
attributes_schema = {"Foo": attributes.Schema("A test attribute")}
env = environment.Environment()
resource._register_class('DummyResource', DummyResource)
env.load({'resource_registry':
{'DummyResource': 'test_resource.template'}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(empty_template, files=files,
env=env),
stack_id=str(uuid.uuid4()))
definition = rsrc_defn.ResourceDefinition('test_t_res',
"DummyResource")
temp_res = template_resource.TemplateResource('test_t_res',
definition, stack)
self.assertRaises(exception.StackValidationFailed,
temp_res.validate)
def test_attributes_missing_no_class(self):
provider = {
'HeatTemplateFormatVersion': '2012-12-12',
'Outputs': {
'Blarg': {'Value': 'wibble'},
},
}
files = {'test_resource.template': json.dumps(provider)}
env = environment.Environment()
env.load({'resource_registry':
{'DummyResource2': 'test_resource.template'}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(empty_template, files=files,
env=env),
stack_id=str(uuid.uuid4()))
definition = rsrc_defn.ResourceDefinition('test_t_res',
"DummyResource2")
temp_res = template_resource.TemplateResource('test_t_res',
definition, stack)
nested = mock.Mock()
nested.outputs = {'Blarg': {'Value': 'fluffy'}}
temp_res._nested = nested
self.assertRaises(exception.InvalidTemplateAttribute,
temp_res.FnGetAtt, 'Foo')
def test_attributes_not_parsable(self):
provider = {
'HeatTemplateFormatVersion': '2012-12-12',
'Outputs': {
'Foo': {'Value': 'bar'},
},
}
files = {'test_resource.template': json.dumps(provider)}
class DummyResource(generic_rsrc.GenericResource):
support_status = support.SupportStatus()
properties_schema = {}
attributes_schema = {"Foo": attributes.Schema(
"A test attribute")}
env = environment.Environment()
resource._register_class('DummyResource', DummyResource)
env.load({'resource_registry':
{'DummyResource': 'test_resource.template'}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(empty_template, files=files,
env=env),
stack_id=str(uuid.uuid4()))
definition = rsrc_defn.ResourceDefinition('test_t_res',
"DummyResource")
temp_res = template_resource.TemplateResource('test_t_res',
definition, stack)
self.assertIsNone(temp_res.validate())
nested = mock.Mock()
nested.outputs = {'Foo': {'Value': 'not-this',
'error_msg': 'it is all bad'}}
nested.output.return_value = None
temp_res._nested = nested
self.assertRaises(exception.InvalidTemplateAttribute,
temp_res.FnGetAtt, 'Foo')
def test_properties_normal(self):
provider = {
'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {
'Foo': {'Type': 'String'},
'Blarg': {'Type': 'String', 'Default': 'wibble'},
},
}
files = {'test_resource.template': json.dumps(provider)}
env = environment.Environment()
env.load({'resource_registry':
{'ResourceWithRequiredPropsAndEmptyAttrs':
'test_resource.template'}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(empty_template, files=files,
env=env),
stack_id=str(uuid.uuid4()))
definition = rsrc_defn.ResourceDefinition(
'test_t_res',
"ResourceWithRequiredPropsAndEmptyAttrs",
{"Foo": "bar"})
temp_res = template_resource.TemplateResource('test_t_res',
definition, stack)
self.assertIsNone(temp_res.validate())
def test_properties_missing(self):
provider = {
'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {
'Blarg': {'Type': 'String', 'Default': 'wibble'},
},
}
files = {'test_resource.template': json.dumps(provider)}
env = environment.Environment()
env.load({'resource_registry':
{'ResourceWithRequiredPropsAndEmptyAttrs':
'test_resource.template'}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(empty_template, files=files,
env=env),
stack_id=str(uuid.uuid4()))
definition = rsrc_defn.ResourceDefinition(
'test_t_res',
"ResourceWithRequiredPropsAndEmptyAttrs")
temp_res = template_resource.TemplateResource('test_t_res',
definition, stack)
self.assertRaises(exception.StackValidationFailed,
temp_res.validate)
def test_properties_extra_required(self):
provider = {
'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {
'Blarg': {'Type': 'String'},
},
}
files = {'test_resource.template': json.dumps(provider)}
class DummyResource(object):
support_status = support.SupportStatus()
properties_schema = {}
attributes_schema = {}
env = environment.Environment()
resource._register_class('DummyResource', DummyResource)
env.load({'resource_registry':
{'DummyResource': 'test_resource.template'}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(empty_template, files=files,
env=env),
stack_id=str(uuid.uuid4()))
definition = rsrc_defn.ResourceDefinition('test_t_res',
"DummyResource",
{"Blarg": "wibble"})
temp_res = template_resource.TemplateResource('test_t_res',
definition, stack)
self.assertRaises(exception.StackValidationFailed,
temp_res.validate)
def test_properties_type_mismatch(self):
provider = {
'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {
'Foo': {'Type': 'String'},
},
}
files = {'test_resource.template': json.dumps(provider)}
class DummyResource(generic_rsrc.GenericResource):
support_status = support.SupportStatus()
properties_schema = {"Foo":
properties.Schema(properties.Schema.MAP)}
attributes_schema = {}
env = environment.Environment()
resource._register_class('DummyResource', DummyResource)
env.load({'resource_registry':
{'DummyResource': 'test_resource.template'}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(
{'HeatTemplateFormatVersion': '2012-12-12'},
files=files, env=env),
stack_id=str(uuid.uuid4()))
definition = rsrc_defn.ResourceDefinition('test_t_res',
"DummyResource",
{"Foo": "bar"})
temp_res = template_resource.TemplateResource('test_t_res',
definition, stack)
ex = self.assertRaises(exception.StackValidationFailed,
temp_res.validate)
self.assertEqual("Property Foo type mismatch between facade "
"DummyResource (Map) and provider (String)",
six.text_type(ex))
def test_properties_list_with_none(self):
provider = {
'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {
'Foo': {'Type': "CommaDelimitedList"},
},
}
files = {'test_resource.template': json.dumps(provider)}
class DummyResource(generic_rsrc.GenericResource):
support_status = support.SupportStatus()
properties_schema = {"Foo":
properties.Schema(properties.Schema.LIST)}
attributes_schema = {}
env = environment.Environment()
resource._register_class('DummyResource', DummyResource)
env.load({'resource_registry':
{'DummyResource': 'test_resource.template'}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(
{'HeatTemplateFormatVersion': '2012-12-12'},
files=files, env=env),
stack_id=str(uuid.uuid4()))
definition = rsrc_defn.ResourceDefinition('test_t_res',
"DummyResource",
{"Foo": [None,
'test', None]})
temp_res = template_resource.TemplateResource('test_t_res',
definition, stack)
self.assertIsNone(temp_res.validate())
definition = rsrc_defn.ResourceDefinition('test_t_res',
"DummyResource",
{"Foo": [None,
None, None]})
temp_res = template_resource.TemplateResource('test_t_res',
definition, stack)
self.assertIsNone(temp_res.validate())
def test_properties_type_match(self):
provider = {
'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {
'Length': {'Type': 'Number'},
},
}
files = {'test_resource.template': json.dumps(provider)}
class DummyResource(generic_rsrc.GenericResource):
support_status = support.SupportStatus()
properties_schema = {"Length":
properties.Schema(properties.Schema.INTEGER)}
attributes_schema = {}
env = environment.Environment()
resource._register_class('DummyResource', DummyResource)
env.load({'resource_registry':
{'DummyResource': 'test_resource.template'}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(
{'HeatTemplateFormatVersion': '2012-12-12'},
files=files, env=env),
stack_id=str(uuid.uuid4()))
definition = rsrc_defn.ResourceDefinition('test_t_res',
"DummyResource",
{"Length": 10})
temp_res = template_resource.TemplateResource('test_t_res',
definition, stack)
self.assertIsNone(temp_res.validate())
def test_boolean_type_provider(self):
provider = {
'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {
'Foo': {'Type': 'Boolean'},
},
}
files = {'test_resource.template': json.dumps(provider)}
class DummyResource(generic_rsrc.GenericResource):
support_status = support.SupportStatus()
properties_schema = {"Foo":
properties.Schema(properties.Schema.BOOLEAN)}
attributes_schema = {}
env = environment.Environment()
resource._register_class('DummyResource', DummyResource)
env.load({'resource_registry':
{'DummyResource': 'test_resource.template'}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(
{'HeatTemplateFormatVersion': '2012-12-12'},
files=files, env=env),
stack_id=str(uuid.uuid4()))
definition = rsrc_defn.ResourceDefinition('test_t_res',
"DummyResource",
{"Foo": "False"})
temp_res = template_resource.TemplateResource('test_t_res',
definition, stack)
self.assertIsNone(temp_res.validate())
def test_resource_info_general(self):
provider = {
'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {
'Foo': {'Type': 'Boolean'},
},
}
files = {'test_resource.template': json.dumps(provider),
'foo.template': json.dumps(provider)}
class DummyResource(generic_rsrc.GenericResource):
properties_schema = {"Foo":
properties.Schema(properties.Schema.BOOLEAN)}
attributes_schema = {}
env = environment.Environment()
resource._register_class('DummyResource', DummyResource)
env.load({'resource_registry':
{'DummyResource': 'test_resource.template',
'resources': {'foo': 'foo.template'}}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(
{'HeatTemplateFormatVersion': '2012-12-12'},
files=files, env=env),
stack_id=str(uuid.uuid4()))
definition = rsrc_defn.ResourceDefinition('test_t_res',
"DummyResource",
{"Foo": "False"})
temp_res = template_resource.TemplateResource('test_t_res',
definition, stack)
self.assertEqual('test_resource.template',
temp_res.template_name)
def test_resource_info_special(self):
provider = {
'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {
'Foo': {'Type': 'Boolean'},
},
}
files = {'test_resource.template': json.dumps(provider),
'foo.template': json.dumps(provider)}
class DummyResource(object):
support_status = support.SupportStatus()
properties_schema = {"Foo":
properties.Schema(properties.Schema.BOOLEAN)}
attributes_schema = {}
env = environment.Environment()
resource._register_class('DummyResource', DummyResource)
env.load({'resource_registry':
{'DummyResource': 'test_resource.template',
'resources': {'foo': {'DummyResource': 'foo.template'}}}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(
{'HeatTemplateFormatVersion': '2012-12-12'},
files=files, env=env),
stack_id=str(uuid.uuid4()))
definition = rsrc_defn.ResourceDefinition('foo',
'DummyResource',
{"Foo": "False"})
temp_res = template_resource.TemplateResource('foo',
definition, stack)
self.assertEqual('foo.template',
temp_res.template_name)
def test_get_error_for_invalid_template_name(self):
# assertion: if the name matches {.yaml|.template} and is valid
# we get the TemplateResource class, otherwise error will be raised.
env_str = {'resource_registry': {'resources': {'fred': {
"OS::ResourceType": "some_magic.yaml"}}}}
env = environment.Environment(env_str)
ex = self.assertRaises(exception.TemplateNotFound, env.get_class,
'OS::ResourceType', 'fred')
self.assertIn('Could not fetch remote template "some_magic.yaml"',
six.text_type(ex))
def test_metadata_update_called(self):
provider = {
'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {
'Foo': {'Type': 'Boolean'},
},
}
files = {'test_resource.template': json.dumps(provider)}
class DummyResource(object):
support_status = support.SupportStatus()
properties_schema = {"Foo":
properties.Schema(properties.Schema.BOOLEAN)}
attributes_schema = {}
env = environment.Environment()
resource._register_class('DummyResource', DummyResource)
env.load({'resource_registry':
{'DummyResource': 'test_resource.template'}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(
{'HeatTemplateFormatVersion': '2012-12-12'},
files=files, env=env),
stack_id=str(uuid.uuid4()))
definition = rsrc_defn.ResourceDefinition('test_t_res',
"DummyResource",
{"Foo": "False"})
temp_res = template_resource.TemplateResource('test_t_res',
definition, stack)
temp_res.metadata_set = mock.Mock()
temp_res.metadata_update()
temp_res.metadata_set.assert_called_once_with({})
def test_get_template_resource_class(self):
test_templ_name = 'file:///etc/heatr/frodo.yaml'
minimal_temp = json.dumps({'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {},
'Resources': {}})
self.m.StubOutWithMock(urlfetch, "get")
urlfetch.get(test_templ_name,
allowed_schemes=('file',)).AndReturn(minimal_temp)
self.m.ReplayAll()
env_str = {'resource_registry': {'resources': {'fred': {
"OS::ResourceType": test_templ_name}}}}
env = environment.Environment(env_str)
cls = env.get_class('OS::ResourceType', 'fred')
self.assertNotEqual(template_resource.TemplateResource, cls)
self.assertTrue(issubclass(cls, template_resource.TemplateResource))
self.assertTrue(hasattr(cls, "properties_schema"))
self.assertTrue(hasattr(cls, "attributes_schema"))
self.m.VerifyAll()
def test_template_as_resource(self):
"""Test that resulting resource has the right prop and attrib schema.
Note that this test requires the Wordpress_Single_Instance.yaml
template in the templates directory since we want to test using a
non-trivial template.
"""
test_templ_name = "WordPress_Single_Instance.yaml"
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'templates', test_templ_name)
# check if its in the directory list vs. exists to work around
# case-insensitive file systems
self.assertIn(test_templ_name, os.listdir(os.path.dirname(path)))
with open(path) as test_templ_file:
test_templ = test_templ_file.read()
self.assertTrue(test_templ, "Empty test template")
self.m.StubOutWithMock(urlfetch, "get")
urlfetch.get(test_templ_name,
allowed_schemes=('file',)
).AndRaise(urlfetch.URLFetchError(
_('Failed to retrieve template')))
urlfetch.get(test_templ_name,
allowed_schemes=('http', 'https')).AndReturn(test_templ)
parsed_test_templ = template_format.parse(test_templ)
self.m.ReplayAll()
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(empty_template),
stack_id=str(uuid.uuid4()))
properties = {
"KeyName": "mykeyname",
"DBName": "wordpress1",
"DBUsername": "wpdbuser",
"DBPassword": "wpdbpass",
"DBRootPassword": "wpdbrootpass",
"LinuxDistribution": "U10"
}
definition = rsrc_defn.ResourceDefinition("test_templ_resource",
test_templ_name,
properties)
templ_resource = resource.Resource("test_templ_resource", definition,
stack)
self.m.VerifyAll()
self.assertIsInstance(templ_resource,
template_resource.TemplateResource)
for prop in parsed_test_templ.get("Parameters", {}):
self.assertIn(prop, templ_resource.properties)
for attrib in parsed_test_templ.get("Outputs", {}):
self.assertIn(attrib, templ_resource.attributes)
for k, v in properties.items():
self.assertEqual(v, templ_resource.properties[k])
self.assertEqual(
{'WordPress_Single_Instance.yaml':
'WordPress_Single_Instance.yaml', 'resources': {}},
stack.env.user_env_as_dict()["resource_registry"])
self.assertNotIn('WordPress_Single_Instance.yaml',
resources.global_env().registry._registry)
def test_persisted_unregistered_provider_templates(self):
"""Test that templates are registered correctly.
Test that templates persisted in the database prior to
https://review.openstack.org/#/c/79953/1 are registered correctly.
"""
env = {'resource_registry': {'http://example.com/test.template': None,
'resources': {}}}
# A KeyError will be thrown prior to this fix.
environment.Environment(env=env)
def test_system_template_retrieve_by_file(self):
# make sure that a TemplateResource defined in the global environment
# can be created and the template retrieved using the "file:"
# scheme.
g_env = resources.global_env()
test_templ_name = 'file:///etc/heatr/frodo.yaml'
g_env.load({'resource_registry':
{'Test::Frodo': test_templ_name}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(empty_template),
stack_id=str(uuid.uuid4()))
minimal_temp = json.dumps({'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {},
'Resources': {}})
self.m.StubOutWithMock(urlfetch, "get")
urlfetch.get(test_templ_name,
allowed_schemes=('http', 'https',
'file')).AndReturn(minimal_temp)
self.m.ReplayAll()
definition = rsrc_defn.ResourceDefinition('test_t_res',
'Test::Frodo')
temp_res = template_resource.TemplateResource('test_t_res',
definition,
stack)
self.assertIsNone(temp_res.validate())
self.m.VerifyAll()
def test_user_template_not_retrieved_by_file(self):
# make sure that a TemplateResource defined in the user environment
# can NOT be retrieved using the "file:" scheme, validation should fail
env = environment.Environment()
test_templ_name = 'file:///etc/heatr/flippy.yaml'
env.load({'resource_registry':
{'Test::Flippy': test_templ_name}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(empty_template, env=env),
stack_id=str(uuid.uuid4()))
definition = rsrc_defn.ResourceDefinition('test_t_res',
'Test::Flippy')
temp_res = template_resource.TemplateResource('test_t_res',
definition,
stack)
self.assertRaises(exception.StackValidationFailed, temp_res.validate)
def test_system_template_retrieve_fail(self):
# make sure that a TemplateResource defined in the global environment
# fails gracefully if the template file specified is inaccessible
# we should be able to create the TemplateResource object, but
# validation should fail, when the second attempt to access it is
# made in validate()
g_env = resources.global_env()
test_templ_name = 'file:///etc/heatr/frodo.yaml'
g_env.load({'resource_registry':
{'Test::Frodo': test_templ_name}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(empty_template),
stack_id=str(uuid.uuid4()))
self.m.StubOutWithMock(urlfetch, "get")
urlfetch.get(test_templ_name,
allowed_schemes=('http', 'https',
'file')
).AndRaise(urlfetch.URLFetchError(
_('Failed to retrieve template')))
self.m.ReplayAll()
definition = rsrc_defn.ResourceDefinition('test_t_res',
'Test::Frodo')
temp_res = template_resource.TemplateResource('test_t_res',
definition,
stack)
self.assertRaises(exception.StackValidationFailed, temp_res.validate)
self.m.VerifyAll()
def test_user_template_retrieve_fail(self):
# make sure that a TemplateResource defined in the user environment
# fails gracefully if the template file specified is inaccessible
# we should be able to create the TemplateResource object, but
# validation should fail, when the second attempt to access it is
# made in validate()
env = environment.Environment()
test_templ_name = 'http://heatr/noexist.yaml'
env.load({'resource_registry':
{'Test::Flippy': test_templ_name}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(empty_template, env=env),
stack_id=str(uuid.uuid4()))
self.m.StubOutWithMock(urlfetch, "get")
urlfetch.get(test_templ_name,
allowed_schemes=('http', 'https')
).AndRaise(urlfetch.URLFetchError(
_('Failed to retrieve template')))
self.m.ReplayAll()
definition = rsrc_defn.ResourceDefinition('test_t_res',
'Test::Flippy')
temp_res = template_resource.TemplateResource('test_t_res',
definition,
stack)
self.assertRaises(exception.StackValidationFailed, temp_res.validate)
self.m.VerifyAll()
def test_user_template_retrieve_fail_ext(self):
# make sure that a TemplateResource defined in the user environment
# fails gracefully if the template file is the wrong extension
# we should be able to create the TemplateResource object, but
# validation should fail, when the second attempt to access it is
# made in validate()
env = environment.Environment()
test_templ_name = 'http://heatr/letter_to_granny.docx'
env.load({'resource_registry':
{'Test::Flippy': test_templ_name}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(empty_template, env=env),
stack_id=str(uuid.uuid4()))
self.m.ReplayAll()
definition = rsrc_defn.ResourceDefinition('test_t_res',
'Test::Flippy')
temp_res = template_resource.TemplateResource('test_t_res',
definition,
stack)
self.assertRaises(exception.StackValidationFailed, temp_res.validate)
self.m.VerifyAll()
def test_incorrect_template_provided_with_url(self):
wrong_template = '''
<head prefix="og: http://ogp.me/ns# fb: http://ogp.me/ns/fb#
'''
env = environment.Environment()
test_templ_name = 'http://heatr/bad_tmpl.yaml'
env.load({'resource_registry':
{'Test::Tmpl': test_templ_name}})
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(empty_template, env=env),
stack_id=str(uuid.uuid4()))
self.m.StubOutWithMock(urlfetch, "get")
urlfetch.get(test_templ_name,
allowed_schemes=('http', 'https')
).AndReturn(wrong_template)
self.m.ReplayAll()
definition = rsrc_defn.ResourceDefinition('test_t_res',
'Test::Tmpl')
temp_res = template_resource.TemplateResource('test_t_res',
definition,
stack)
err = self.assertRaises(exception.StackValidationFailed,
temp_res.validate)
self.assertIn('Error parsing template: ', six.text_type(err))
self.m.VerifyAll()
class TemplateDataTest(common.HeatTestCase):
def setUp(self):
super(TemplateDataTest, self).setUp()
files = {}
self.ctx = utils.dummy_context()
env = environment.Environment()
env.load({'resource_registry':
{'ResourceWithRequiredPropsAndEmptyAttrs':
'test_resource.template'}})
self.stack = parser.Stack(self.ctx, 'test_stack',
template.Template(empty_template,
files=files,
env=env),
stack_id=str(uuid.uuid4()))
self.defn = rsrc_defn.ResourceDefinition(
'test_t_res',
"ResourceWithRequiredPropsAndEmptyAttrs",
{"Foo": "bar"})
self.res = template_resource.TemplateResource('test_t_res',
self.defn, self.stack)
def test_template_data_in_update_without_template_file(self):
self.res.action = self.res.UPDATE
self.res.nested = mock.MagicMock()
self.res.get_template_file = mock.Mock(
side_effect=exception.TemplateNotFound(
message='test_resource.template'))
self.assertRaises(exception.TemplateNotFound, self.res.template_data)
def test_template_data_in_create_without_template_file(self):
self.res.action = self.res.CREATE
self.res.nested = mock.MagicMock()
self.res.get_template_file = mock.Mock(
side_effect=exception.TemplateNotFound(
message='test_resource.template'))
self.assertEqual('{}', self.res.template_data())
class TemplateResourceCrudTest(common.HeatTestCase):
provider = {
'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {
'Foo': {'Type': 'String'},
'Blarg': {'Type': 'String', 'Default': 'wibble'},
},
}
def setUp(self):
super(TemplateResourceCrudTest, self).setUp()
files = {'test_resource.template': json.dumps(self.provider)}
self.ctx = utils.dummy_context()
env = environment.Environment()
env.load({'resource_registry':
{'ResourceWithRequiredPropsAndEmptyAttrs':
'test_resource.template'}})
self.stack = parser.Stack(self.ctx, 'test_stack',
template.Template(empty_template,
files=files,
env=env),
stack_id=str(uuid.uuid4()))
self.defn = rsrc_defn.ResourceDefinition(
'test_t_res',
"ResourceWithRequiredPropsAndEmptyAttrs",
{"Foo": "bar"})
self.res = template_resource.TemplateResource('test_t_res',
self.defn, self.stack)
self.assertIsNone(self.res.validate())
def test_handle_create(self):
self.res.create_with_template = mock.Mock(return_value=None)
self.res.handle_create()
self.res.create_with_template.assert_called_once_with(
self.provider, {'Foo': 'bar'})
def test_handle_adopt(self):
self.res.create_with_template = mock.Mock(return_value=None)
self.res.handle_adopt(resource_data={'resource_id': 'fred'})
self.res.create_with_template.assert_called_once_with(
self.provider, {'Foo': 'bar'},
adopt_data={'resource_id': 'fred'})
def test_handle_update(self):
self.res.update_with_template = mock.Mock(return_value=None)
self.res.handle_update(self.defn, None, None)
self.res.update_with_template.assert_called_once_with(
self.provider, {'Foo': 'bar'})
def test_handle_delete(self):
self.res.rpc_client = mock.MagicMock()
self.res.id = 55
self.res.uuid = six.text_type(uuid.uuid4())
self.res.resource_id = six.text_type(uuid.uuid4())
self.res.action = self.res.CREATE
self.res.nested = mock.MagicMock()
ident = identifier.HeatIdentifier(self.ctx.tenant_id,
self.res.physical_resource_name(),
self.res.resource_id)
self.res.nested().identifier.return_value = ident
self.res.handle_delete()
rpcc = self.res.rpc_client.return_value
rpcc.delete_stack.assert_called_once_with(
self.ctx,
self.res.nested().identifier())
| {
"content_hash": "755c1e34281f550ac0e5ae26672819ef",
"timestamp": "",
"source": "github",
"line_count": 1009,
"max_line_length": 79,
"avg_line_length": 44.41724479682854,
"alnum_prop": 0.5214539125778165,
"repo_name": "maestro-hybrid-cloud/heat",
"id": "91cbe91b1acb2e8635ebaa6254b50367c1957122",
"size": "45392",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "heat/tests/test_provider_template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6954236"
},
{
"name": "Shell",
"bytes": "33503"
}
],
"symlink_target": ""
} |
"""
Implements some utility functionality.
"""
from datafinder.persistence.adapters.sftp import constants
__version__ = "$Revision-Id:$"
class ItemIdentifierMapper(object):
""" Utility class to simplify different operations on logical
and persistence IDs. """
def __init__(self, basePath):
"""
@param basePath: Base path on the SFTP server.
@type basePath: C{unicode}
"""
self._basePath = basePath
def determinePeristenceId(self, identifier):
""" Transforms the logical identifier to the persistence identifier.
@param identifier: The logical ID of an item.
@type identifier: C{unicode}
@return: Path on the SFTP server.
It is already correctly encoded to use it with the library.
@rtype: C{str}
"""
if self._basePath.endswith("/"):
persistenceId = self._basePath[:-1] + identifier
else:
persistenceId = self._basePath + identifier
persistenceId = persistenceId.encode(
constants.FILE_NAME_ENCODING, "replace")
return persistenceId
@staticmethod
def determineParentId(identifier):
""" Determines the logical ID of the parent item.
@param identifier: The logical ID of an item.
@type identifier: C{unicode}
@return: The logical ID of the parent item.
@rtype: C{unicode}
"""
if identifier.endswith("/"):
identifier = identifier[:-1]
parentId = "/".join(identifier.rsplit("/")[:-1])
if parentId == "" and identifier.startswith("/") and identifier != "/":
parentId = "/"
return parentId
@staticmethod
def determineChildId(identifier, name):
"""
Creates the child ID for the given identifier and the child name.
@note: Both parameters can be C{str} or C{unicode}.
However, make sure that both have the same type/
are encoded the same way.
"""
if not identifier and not name:
return ""
if identifier.endswith("/"):
child_id = identifier + name
else:
child_id = identifier + "/" + name
return child_id
@staticmethod
def determinePersistenceChildId(persistenceIdentifier, name):
"""
Creates the child ID for the given persistence identifier and the child name.
@note: It is just an alias definition for persistence IDs which implies that you
use already encoded string.
"""
return ItemIdentifierMapper.determineChildId(persistenceIdentifier, name)
| {
"content_hash": "15181f322eeba8cd891dcbd9fa22758d",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 89,
"avg_line_length": 32.34090909090909,
"alnum_prop": 0.5685172171468728,
"repo_name": "DLR-SC/DataFinder",
"id": "c91235c377d0d835c912402299c1f35963844e65",
"size": "4543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/datafinder/persistence/adapters/sftp/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "7649"
},
{
"name": "Python",
"bytes": "7056802"
},
{
"name": "QMake",
"bytes": "1975"
}
],
"symlink_target": ""
} |
import sys
import logging
import gensim
from optparse import OptionParser
from docs import config
from lib.models import load_model
logging.basicConfig(format="%(asctime)s : %(levelname)s : %(message)s", level=logging.INFO)
# Build this program's option parser
def build_opt_parser():
usage = "usage: %prog [options] <model>"
parser = OptionParser(usage=usage)
parser.add_option("-t", "--num-terms", dest="num_terms",
default=config.default_display_depth, type="int",
help="The number of terms to be displayed per topic in the model"
)
return parser
# Parse commandline arguments using OptionParser given
def parse_arguments(parser):
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
exit()
return options, args
def view_model(fname, num_terms):
model = load_model(fname)
logging.info("{} Topics".format(model.num_topics))
model.print_topics(model.num_topics, num_words=num_terms)
# Main function. Entry point for program
def main():
parser = build_opt_parser()
(options, args) = parse_arguments(parser)
view_model(args[0], options.num_terms)
if __name__ == "__main__":
main()
| {
"content_hash": "42217613c3db9862a64e4f473ec3c238",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 91,
"avg_line_length": 26.847826086956523,
"alnum_prop": 0.6623481781376518,
"repo_name": "munnellg/elltm",
"id": "d87a720eee822a824ea866b39fcc8d16e3f7a5b2",
"size": "1235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "view_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25922"
}
],
"symlink_target": ""
} |
"""
Created on Thu Mar 24 08:18:04 2016
@author: npop
class DataWriterAscii
Inherits from DataWriter
This is simply head files and ascii data files
However, the header file saved is relevant only to this software
The header file means less processing to read the header information
"""
import os
import glob
import struct
import xml.etree.ElementTree as ET
from datetime import datetime, timedelta
import numpy as np
# parents
from dataWriter import DataWriter
# utils
from utilsIO import *
class DataWriterAscii(DataWriter):
def writeDataFiles(self, chans, chanData):
self.extension = ".ascii"
for idx, c in enumerate(chans):
writePath = os.path.join(self.getOutPath(), "chan_{:02d}{}".format(idx, self.extension))
# this could probably be made quicker - numpy savetxt maybe
dataF = open(writePath, "w")
size = chanData[c].size
for i in xrange(0, size):
dataF.write("{:9f}\n".format(chanData[c][i]))
dataF.close()
###################
### DEBUG
###################
def printInfoBegin(self):
self.printText("####################")
self.printText("DATA WRITER ASCII INFO BEGIN")
self.printText("####################")
def printInfoEnd(self):
self.printText("####################")
self.printText("DATA WRITER ASCII INFO END")
self.printText("####################")
def printText(self, infoStr):
generalPrint("Data Writer ASCII Info", infoStr)
def printWarning(self, warnStr):
warningPrint("Data Writer ASCII Warning", warnStr) | {
"content_hash": "967124af71d912689fafb6346873e952",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 91,
"avg_line_length": 28.20754716981132,
"alnum_prop": 0.6682274247491639,
"repo_name": "nss350/magPy",
"id": "21f051c3c1219197b8229a16b5c0353b55cd659f",
"size": "1519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/dataWriterAscii.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "496897"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from django.db import models
from midnight_main.models import BaseTree, Base, BreadCrumbsMixin, BaseComment
from ckeditor.fields import RichTextField
from django.utils.translation import ugettext_lazy as _
from sorl.thumbnail import ImageField
from mptt.fields import TreeManyToManyField
class Section(BreadCrumbsMixin, BaseTree):
"""
Модель категории новостей
"""
title = models.CharField(max_length=255, verbose_name=_('Title'))
slug = models.SlugField(max_length=255, unique=True, verbose_name=_('Slug'))
sort = models.IntegerField(default=500, verbose_name=_('Sort'))
metatitle = models.CharField(max_length=2000, blank=True, verbose_name=_('Title'))
keywords = models.CharField(max_length=2000, blank=True, verbose_name=_('Keywords'))
description = models.CharField(max_length=2000, blank=True, verbose_name=_('Description'))
def get_absolute_url(self):
return reverse('midnight_news:news_list', kwargs={'slug': self.slug})
def __str__(self):
return self.title
class MPTTMeta:
order_insertion_by = ['sort']
class Meta:
verbose_name = _('NewsSection')
verbose_name_plural = _('NewsSections')
class News(Base):
"""
Модель новости
"""
title = models.CharField(max_length=255, verbose_name=_('Title'))
slug = models.SlugField(max_length=255, unique=True, verbose_name=_('Slug'))
date = models.DateField(verbose_name=_('Date'), blank=False)
sections = TreeManyToManyField(Section, verbose_name=_('Sections'))
image = ImageField(upload_to='news', verbose_name=_('Image'), blank=True)
annotation = models.TextField(blank=True, verbose_name=_('Annotation'))
text = RichTextField(blank=True, verbose_name=_('Text'))
comments = models.BooleanField(default=False, verbose_name=_('Comments'))
metatitle = models.CharField(max_length=2000, blank=True, verbose_name=_('Title'))
keywords = models.CharField(max_length=2000, blank=True, verbose_name=_('Keywords'))
description = models.CharField(max_length=2000, blank=True, verbose_name=_('Description'))
def get_absolute_url(self):
return reverse('midnight_news:news_detail', kwargs={'section_slug': self.sections.all()[0].slug, 'slug': self.slug})
def __str__(self):
return self.title
class Meta:
verbose_name = _('NewsItem')
verbose_name_plural = _('News')
class NewsComment(BaseComment):
"""
Модель комментария к новости
"""
obj = models.ForeignKey(News)
class Meta:
verbose_name = _('NewsComment')
verbose_name_plural = _('NewsComments')
| {
"content_hash": "cbdffb87b9c210c3afe7d654b39294a6",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 124,
"avg_line_length": 28.083333333333332,
"alnum_prop": 0.6761869436201781,
"repo_name": "webadmin87/midnight",
"id": "b67b35eb818dc4e5baf99aad96bee0144538fed9",
"size": "2757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "midnight_news/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1201"
},
{
"name": "HTML",
"bytes": "54017"
},
{
"name": "JavaScript",
"bytes": "4595"
},
{
"name": "Python",
"bytes": "119258"
}
],
"symlink_target": ""
} |
"""
Blueprint providing features regarding the news entries.
"""
from operator import attrgetter
from flask import Blueprint, abort, current_app, render_template, request
bp_news = Blueprint('news', __name__, url_prefix='/news')
@bp_news.route("/")
def show():
"""Get all markdown files from 'content/news/', parse them and put
them in a list for the template.
The formatting of these files is described in the readme.
"""
start = request.args.get('start', None, int)
end = request.args.get('end', None, int)
cf_pages = current_app.cf_pages
cf_pages.reload()
news = sorted(
(article for article in cf_pages.get_articles_of_category('news')
if hasattr(article, 'date')),
key=attrgetter('date'),
reverse=True,
)
if len(news) == 0:
return render_template("index.html", articles=None,
previous_range=0, next_range=0)
default_step = 10
# calculating mod len() allows things like `end=-1` for the last
# article(s). this may lead to confusing behaviour because this
# allows values out of the range (|val|≥len(latest)), but this
# will only result in displaying articles instead of throwing an
# error. Apart from that, such values would just appear if edited
# manually.
if start is None:
if end is None:
start, end = 0, default_step
else:
end %= len(news)
start = max(end - default_step + 1, 0)
else:
start %= len(news)
if end is None:
end = min(start + default_step - 1, len(news) - 1)
else:
end %= len(news)
delta = end - start + 1
prev_range, next_range = None, None
if start > 0:
prev_range = {'start': max(start - delta, 0), 'end': start - 1}
if end < len(news) - 1:
next_range = {'start': end + 1, 'end': min(end + delta, len(news) - 1)}
return render_template("index.html", articles=news[start:end+1],
previous_range=prev_range, next_range=next_range)
@bp_news.route("/<filename>")
def show_news(filename):
news = current_app.cf_pages.get_articles_of_category('news')
for article in news:
if article.file_basename == filename:
return render_template("template.html", article=article)
abort(404)
| {
"content_hash": "1dc87ba2ebdc0081ae8911580fb61918",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 33.225352112676056,
"alnum_prop": 0.602797795676134,
"repo_name": "agdsn/sipa",
"id": "8cb54a8a5a43143047e81dbb3c432b41debbff10",
"size": "2361",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "sipa/blueprints/news.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17185"
},
{
"name": "Dockerfile",
"bytes": "840"
},
{
"name": "HTML",
"bytes": "57259"
},
{
"name": "JavaScript",
"bytes": "252921"
},
{
"name": "Makefile",
"bytes": "947"
},
{
"name": "Python",
"bytes": "300232"
},
{
"name": "Shell",
"bytes": "4193"
}
],
"symlink_target": ""
} |
import copy
import multiprocessing
from multiprocessing import Pool
import numpy as np
from tick.base.simulation import Simu
def simulate_single(simulation):
simulation.simulate()
return simulation
class SimuHawkesMulti(Simu):
"""Parallel simulations of a single Hawkes simulation
The incoming Hawkes simulation is replicated by the number n_simulations. At
simulation time, the replicated Hawkes processes are run in parallel on a
number of threads specified by n_threads.
Attributes
----------
hawkes_simu : 'SimuHawkes'
The Hawkes simulation that is replicated and simulated in parallel
n_simulations : `int`
The number of times the Hawkes simulation is performed
n_threads : `int`, default=1
The number of threads used to run the Hawkes simulations. If this number
is negative or zero, the number of threads is set to the number of
system available CPU cores.
n_total_jumps : `list` of `int`
List of the total number of jumps simulated for each process
timestamps : `list` of `list` of `np.ndarray`, size=n_simulations, n_nodes
A list containing n_simulations lists of timestamps arrays, one for each
process that is being simulated by this object.
end_time : `list` of `float`
List of the end time for each Hawkes process
max_jumps : `list` of `int`
List of the maximum number of jumps for each process
simulation_time : `list` of `float`
List of times each process has been simulated
n_nodes : `list` of `int`
List of the number of nodes of the Hawkes processes
spectral_radius : `list` of `float`
List of the spectral radii of the Hawkes processes
mean_intensity : `list` of `float`
List of the mean intensities of the Hawkes processes
"""
_attrinfos = {
"_simulations": {},
"hawkes_simu": {
"writable": False
},
"n_simulations": {
"writable": False
},
}
def __init__(self, hawkes_simu, n_simulations, n_threads=1):
self.hawkes_simu = hawkes_simu
self.n_simulations = n_simulations
if n_threads <= 0:
n_threads = multiprocessing.cpu_count()
self.n_threads = n_threads
if n_simulations <= 0:
raise ValueError("n_simulations must be greater or equal to 1")
self._simulations = [
copy.deepcopy(hawkes_simu) for _ in range(n_simulations)
]
Simu.__init__(self, seed=self.seed, verbose=hawkes_simu.verbose)
if self.seed is not None and self.seed >= 0:
self.reseed_simulations(self.seed)
@property
def seed(self):
return self.hawkes_simu.seed
@seed.setter
def seed(self, val):
self.reseed_simulations(val)
def reseed_simulations(self, seed):
"""Reseeds all simulations such that each simulation is started with a
unique seed. The random selection of new seeds is seeded with the value
given in 'seed'.
Parameters
----------
seed :
Seed used to randomly select new seeds
"""
# this updates self.seed
self.hawkes_simu._pp.reseed_random_generator(seed)
if seed >= 0:
np.random.seed(self.seed)
new_seeds = np.random.randint(0, 2 ** 31 - 1, self.n_simulations)
new_seeds = new_seeds.astype('int32')
else:
new_seeds = np.ones(self.n_simulations, dtype='int32') * seed
for simu, seed in zip(self._simulations, new_seeds):
simu.seed = seed.item()
@property
def n_total_jumps(self):
return [simu.n_total_jumps for simu in self._simulations]
@property
def timestamps(self):
return [simu.timestamps for simu in self._simulations]
@property
def end_time(self):
return [simu.end_time for simu in self._simulations]
@end_time.setter
def end_time(self, end_times):
if len(end_times) != self.n_simulations:
raise ValueError('end_time must have length {}'.format(
self.n_simulations))
for i, simu in enumerate(self._simulations):
simu.end_time = end_times[i]
@property
def max_jumps(self):
return [simu.max_jumps for simu in self._simulations]
@property
def simulation_time(self):
return [simu.simulation_time for simu in self._simulations]
@property
def n_nodes(self):
return [simu.n_nodes for simu in self._simulations]
@property
def spectral_radius(self):
return [simu.spectral_radius() for simu in self._simulations]
@property
def mean_intensity(self):
return [simu.mean_intensity() for simu in self._simulations]
def get_single_simulation(self, i):
return self._simulations[i]
def _simulate(self):
""" Launches a series of n_simulations Hawkes simulation in a thread
pool
"""
with Pool(self.n_threads) as p:
self._simulations = p.map(simulate_single, self._simulations)
| {
"content_hash": "7cdcc7cad08d07b6c67c755710a0c5ad",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 80,
"avg_line_length": 29.751445086705203,
"alnum_prop": 0.6259957256654362,
"repo_name": "Dekken/tick",
"id": "70137ecb723daebea2bdcd3c3d49905efd28967e",
"size": "5172",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tick/hawkes/simulation/simu_hawkes_multi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6660"
},
{
"name": "C++",
"bytes": "1181742"
},
{
"name": "CMake",
"bytes": "22073"
},
{
"name": "Dockerfile",
"bytes": "2017"
},
{
"name": "Python",
"bytes": "1450866"
},
{
"name": "Shell",
"bytes": "33446"
}
],
"symlink_target": ""
} |
"""Tests for the TypeSpec base class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import googletest
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
class TwoTensors(object):
"""A simple value type to test TypeSpec.
Contains two tensors (x, y) and a string (color). The color value is a
stand-in for any extra type metadata we might need to store.
"""
def __init__(self, x, y, color="red"):
assert isinstance(color, str)
self.x = ops.convert_to_tensor(x)
self.y = ops.convert_to_tensor(y)
self.color = color
@type_spec.register("tf.TwoTensorsSpec")
class TwoTensorsSpec(type_spec.TypeSpec):
"""A TypeSpec for the TwoTensors value type."""
def __init__(self, x_shape, x_dtype, y_shape, y_dtype, color="red"):
self.x_shape = tensor_shape.as_shape(x_shape)
self.x_dtype = dtypes.as_dtype(x_dtype)
self.y_shape = tensor_shape.as_shape(y_shape)
self.y_dtype = dtypes.as_dtype(y_dtype)
self.color = color
value_type = property(lambda self: TwoTensors)
@property
def _component_specs(self):
return (tensor_spec.TensorSpec(self.x_shape, self.x_dtype),
tensor_spec.TensorSpec(self.y_shape, self.y_dtype))
def _to_components(self, value):
return (value.x, value.y)
def _from_components(self, components):
x, y = components
return TwoTensors(x, y, self.color)
def _serialize(self):
return (self.x_shape, self.x_dtype, self.y_shape, self.y_dtype, self.color)
@classmethod
def from_value(cls, value):
return cls(value.x.shape, value.x.dtype, value.y.shape, value.y.dtype,
value.color)
type_spec.register_type_spec_from_value_converter(
TwoTensors, TwoTensorsSpec.from_value)
class TwoComposites(object):
"""A simple value type to test TypeSpec.
Contains two composite tensorstensors (x, y) and a string (color).
"""
def __init__(self, x, y, color="red"):
assert isinstance(color, str)
self.x = ops.convert_to_tensor_or_composite(x)
self.y = ops.convert_to_tensor_or_composite(y)
self.color = color
@type_spec.register("tf.TwoCompositesSpec")
class TwoCompositesSpec(type_spec.TypeSpec):
"""A TypeSpec for the TwoTensors value type."""
def __init__(self, x_spec, y_spec, color="red"):
self.x_spec = x_spec
self.y_spec = y_spec
self.color = color
value_type = property(lambda self: TwoComposites)
@property
def _component_specs(self):
return (self.x_spec, self.y_spec)
def _to_components(self, value):
return (value.x, value.y)
def _from_components(self, components):
x, y = components
return TwoTensors(x, y, self.color)
def _serialize(self):
return (self.x_spec, self.y_spec, self.color)
@classmethod
def from_value(cls, value):
return cls(type_spec.type_spec_from_value(value.x),
type_spec.type_spec_from_value(value.y),
value.color)
type_spec.register_type_spec_from_value_converter(
TwoComposites, TwoCompositesSpec.from_value)
class NestOfTensors(object):
"""CompositeTensor containing a nest of tensors."""
def __init__(self, x):
self.nest = x
@type_spec.register("tf.NestOfTensorsSpec")
class NestOfTensorsSpec(type_spec.TypeSpec):
"""A TypeSpec for the NestOfTensors value type."""
def __init__(self, spec):
self.spec = spec
value_type = property(lambda self: NestOfTensors)
_component_specs = property(lambda self: self.spec)
def _to_components(self, value):
return nest.flatten(value)
def _from_components(self, components):
return nest.pack_sequence_as(self.spec, components)
def _serialize(self):
return self.spec
def __repr__(self):
if hasattr(self.spec, "_fields") and isinstance(
self.spec._fields, collections_abc.Sequence) and all(
isinstance(f, six.string_types) for f in self.spec._fields):
return "%s(%r)" % (type(self).__name__, self._serialize())
return super(type_spec.TypeSpec, self).__repr__()
@classmethod
def from_value(cls, value):
return cls(nest.map_structure(type_spec.type_spec_from_value, value.nest))
@classmethod
def _deserialize(cls, spec):
return cls(spec)
type_spec.register_type_spec_from_value_converter(
NestOfTensors, NestOfTensorsSpec.from_value)
_TestNamedTuple = collections.namedtuple("NamedTuple", ["a", "b"])
_TestNamedTupleSingleField = collections.namedtuple("SingleField", ["a"])
_TestNamedTupleDifferentField = collections.namedtuple("DifferentField",
["a", "c"])
class TypeSpecTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters(
("FullySpecified",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool)),
("UnknownDim",
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool)),
("UnknownRank",
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool),
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool)),
("Metadata",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "blue"),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "blue")),
("NumpyMetadata",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
(np.int32(1), np.float32(1.),
np.array([[1, 2], [3, 4]]))),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
(np.int32(1), np.float32(1.),
np.array([[1, 2], [3, 4]])))),
)
def testEquality(self, v1, v2):
# pylint: disable=g-generic-assert
self.assertEqual(v1, v2)
self.assertEqual(v2, v1)
self.assertFalse(v1 != v2)
self.assertFalse(v2 != v1)
self.assertEqual(hash(v1), hash(v2))
@parameterized.named_parameters(
("UnknownDim",
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),
("UnknownRank",
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),
("IncompatibleDtype",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.float32)),
("IncompatibleRank",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None, None], dtypes.bool)),
("IncompatibleDimSize",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 8], dtypes.int32, [None], dtypes.bool)),
("IncompatibleMetadata",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "red"),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "blue")),
("SwappedValues",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([None], dtypes.bool, [5, 3], dtypes.int32)),
("DiffMetadataNumpy",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
np.array([[1, 2], [3, 4]])),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
np.array([[1, 2], [3, 8]]))),
("DiffMetadataTensorSpecName",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
tensor_spec.TensorSpec([4], name="a")),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
tensor_spec.TensorSpec([4], name="b"))),
("Non-TypeSpec",
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool), 5),
)
def testInequality(self, v1, v2):
# pylint: disable=g-generic-assert
self.assertNotEqual(v1, v2)
self.assertNotEqual(v2, v1)
self.assertFalse(v1 == v2)
self.assertFalse(v2 == v1)
@parameterized.named_parameters(
("SameValue",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool)),
("UnknownDim",
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),
("UnknownRank",
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),
)
def testIsCompatibleWith(self, v1, v2):
self.assertTrue(v1.is_compatible_with(v2))
self.assertTrue(v2.is_compatible_with(v1))
@parameterized.named_parameters(
("IncompatibleDtype",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.float32)),
("IncompatibleRank",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None, None], dtypes.bool)),
("IncompatibleDimSize",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 8], dtypes.int32, [None], dtypes.bool)),
("IncompatibleMetadata",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "red"),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, "blue")),
("SwappedValues",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([None], dtypes.bool, [5, 3], dtypes.int32)),
)
def testIsNotCompatibleWith(self, v1, v2):
self.assertFalse(v1.is_compatible_with(v2))
self.assertFalse(v2.is_compatible_with(v1))
@parameterized.named_parameters(
("EqualTypes",
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool)),
("UnknownDim",
TwoTensorsSpec([5, None], dtypes.int32, [8], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool)),
("UnknownRank",
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool),
TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool)),
("DiffRank",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None, None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool)),
("DiffDimSize",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 8], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool)),
("DiffMetadataTensorSpecName",
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
tensor_spec.TensorSpec([4], name="a")),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
tensor_spec.TensorSpec([4], name="b")),
TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,
tensor_spec.TensorSpec([4], name=None))),
("NamedTuple",
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32))),
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32))),
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32)))),
)
def testMostSpecificCompatibleType(self, v1, v2, expected):
self.assertEqual(v1.most_specific_compatible_type(v2), expected)
self.assertEqual(v2.most_specific_compatible_type(v1), expected)
@parameterized.named_parameters(
("IncompatibleDtype",
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),
TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.float32)),
("IncompatibleMetadata",
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool, "red"),
TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool, "blue")),
)
def testMostSpecificCompatibleTypeException(self, v1, v2):
with self.assertRaises(ValueError):
v1.most_specific_compatible_type(v2)
with self.assertRaises(ValueError):
v2.most_specific_compatible_type(v1)
def testMostSpecificCompatibleTypeNamedTupleIsNotTuple(self):
named_tuple_spec_a = NestOfTensorsSpec.from_value(NestOfTensors(
_TestNamedTuple(a=1, b="aaa")))
named_tuple_spec_b = NestOfTensorsSpec.from_value(NestOfTensors(
_TestNamedTuple(a=2, b="bbb")))
named_tuple_spec_c = NestOfTensorsSpec.from_value(NestOfTensors(
_TestNamedTuple(a=3, b="ccc")))
normal_tuple_spec = NestOfTensorsSpec.from_value(NestOfTensors((2, "bbb")))
result_a_b = named_tuple_spec_a.most_specific_compatible_type(
named_tuple_spec_b)
result_b_a = named_tuple_spec_b.most_specific_compatible_type(
named_tuple_spec_a)
self.assertEqual(repr(result_a_b), repr(named_tuple_spec_c))
self.assertEqual(repr(result_b_a), repr(named_tuple_spec_c))
# Test that spec of named tuple is not equal to spec of normal tuple.
self.assertNotEqual(repr(result_a_b), repr(normal_tuple_spec))
@parameterized.named_parameters(
("IncompatibleDtype",
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.bool))),
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32)))),
("DifferentTupleSize",
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.bool))),
NestOfTensorsSpec(_TestNamedTupleSingleField(
a=tensor_spec.TensorSpec((), dtypes.int32)))),
("DifferentFieldName",
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32))),
NestOfTensorsSpec(_TestNamedTupleDifferentField(
a=tensor_spec.TensorSpec((), dtypes.int32),
c=tensor_spec.TensorSpec((), dtypes.int32)))),
("NamedTupleAndTuple",
NestOfTensorsSpec(_TestNamedTuple(
a=tensor_spec.TensorSpec((), dtypes.int32),
b=tensor_spec.TensorSpec((), dtypes.int32))),
NestOfTensorsSpec((
tensor_spec.TensorSpec((), dtypes.int32),
tensor_spec.TensorSpec((), dtypes.int32)))),
)
def testMostSpecificCompatibleTypeForNamedTuplesException(self, v1, v2):
with self.assertRaises(ValueError):
v1.most_specific_compatible_type(v2)
with self.assertRaises(ValueError):
v2.most_specific_compatible_type(v1)
def toTensorList(self):
value = TwoTensors([1, 2, 3], [1.0, 2.0], "red")
spec = TwoTensorsSpec.from_value(value)
tensor_list = spec._to_tensor_list(value)
self.assertLen(tensor_list, 2)
self.assertIs(tensor_list[0], value.x)
self.assertIs(tensor_list[1], value.y)
def fromTensorList(self):
x = ops.convert_to_tensor([1, 2, 3])
y = ops.convert_to_tensor([1.0, 2.0])
color = "green"
spec = TwoTensorsSpec(x.shape, x.dtype, y.shape, y.dtype, color)
value = spec._from_tensor_list([x, y])
self.assertIs(value.x, x)
self.assertIs(value.y, y)
self.assertEqual(value.color, color)
def fromIncompatibleTensorList(self):
x = ops.convert_to_tensor([1, 2, 3])
y = ops.convert_to_tensor([1.0, 2.0])
spec1 = TwoTensorsSpec([100], x.dtype, y.shape, y.dtype, "green")
spec2 = TwoTensorsSpec(x.shape, x.dtype, y.shape, dtypes.bool, "green")
with self.assertRaises(ValueError):
spec1._from_tensor_list([x, y]) # shape mismatch
with self.assertRaises(ValueError):
spec2._from_tensor_list([x, y]) # dtype mismatch
def testFlatTensorSpecs(self):
spec = TwoTensorsSpec([5], dtypes.int32, [5, 8], dtypes.float32, "red")
self.assertEqual(spec._flat_tensor_specs,
[tensor_spec.TensorSpec([5], dtypes.int32),
tensor_spec.TensorSpec([5, 8], dtypes.float32)])
def testRepr(self):
spec = TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool)
self.assertEqual(
repr(spec),
"TwoTensorsSpec(%r, %r, %r, %r, %r)" %
(tensor_shape.TensorShape([5, 3]), dtypes.int32,
tensor_shape.TensorShape(None), dtypes.bool, "red"))
def testFromValue(self):
value = TwoTensors([1, 2, 3], [1.0, 2.0], "red")
spec = type_spec.type_spec_from_value(value)
self.assertEqual(spec, TwoTensorsSpec.from_value(value))
def testNestedRagged(self):
# Check that TwoCompositeSpecs are compatible if one has a nested
# RaggedTensorSpec w/ ragged_rank=0 and the other has a corresponding
# nested TensorSpec.
spec1 = TwoCompositesSpec(
ragged_tensor.RaggedTensorSpec([10], dtypes.int32, ragged_rank=0),
tensor_spec.TensorSpec(None, dtypes.int32))
spec2 = TwoCompositesSpec(
tensor_spec.TensorSpec([10], dtypes.int32),
tensor_spec.TensorSpec(None, dtypes.int32))
spec3 = TwoCompositesSpec(
tensor_spec.TensorSpec([12], dtypes.int32),
tensor_spec.TensorSpec(None, dtypes.int32))
self.assertTrue(spec1.is_compatible_with(spec2))
self.assertFalse(spec1.is_compatible_with(spec3))
def testRegistry(self):
self.assertEqual("tf.TwoCompositesSpec",
type_spec.get_name(TwoCompositesSpec))
self.assertEqual("tf.TwoTensorsSpec", type_spec.get_name(TwoTensorsSpec))
self.assertEqual(TwoCompositesSpec,
type_spec.lookup("tf.TwoCompositesSpec"))
self.assertEqual(TwoTensorsSpec, type_spec.lookup("tf.TwoTensorsSpec"))
def testRegistryTypeErrors(self):
with self.assertRaisesRegex(TypeError, "Expected `name` to be a string"):
type_spec.register(None)
with self.assertRaisesRegex(TypeError, "Expected `name` to be a string"):
type_spec.register(TwoTensorsSpec)
with self.assertRaisesRegex(TypeError, "Expected `cls` to be a TypeSpec"):
type_spec.register("tf.foo")(None)
with self.assertRaisesRegex(TypeError, "Expected `cls` to be a TypeSpec"):
type_spec.register("tf.foo")(ragged_tensor.RaggedTensor)
def testRegistryDuplicateErrors(self):
with self.assertRaisesRegex(
ValueError, "Name tf.TwoCompositesSpec has already been registered "
"for class __main__.TwoCompositesSpec."):
@type_spec.register("tf.TwoCompositesSpec") # pylint: disable=unused-variable
class NewTypeSpec(TwoCompositesSpec):
pass
with self.assertRaisesRegex(
ValueError, "Class __main__.TwoCompositesSpec has already been "
"registered with name tf.TwoCompositesSpec"):
type_spec.register("tf.NewName")(TwoCompositesSpec)
def testRegistryNameErrors(self):
for bad_name in ["foo", "", "hello world"]:
with self.assertRaises(ValueError):
type_spec.register(bad_name)
def testRegistryLookupErrors(self):
with self.assertRaises(TypeError):
type_spec.lookup(None)
with self.assertRaisesRegex(
ValueError, "No TypeSpec has been registered with name 'foo.bar'"):
type_spec.lookup("foo.bar")
def testRegistryGetNameErrors(self):
with self.assertRaises(TypeError):
type_spec.get_name(None)
class Foo(TwoCompositesSpec):
pass
with self.assertRaisesRegex(
ValueError, "TypeSpec __main__.Foo has not been registered."):
type_spec.get_name(Foo)
if __name__ == "__main__":
googletest.main()
| {
"content_hash": "d8d8794f32f1c6a758508159350da991",
"timestamp": "",
"source": "github",
"line_count": 519,
"max_line_length": 84,
"avg_line_length": 39.14258188824663,
"alnum_prop": 0.652030519320699,
"repo_name": "petewarden/tensorflow",
"id": "d55a6c4c3794b8363379b7a495fc7f53984babaa",
"size": "21005",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/type_spec_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31796"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "895451"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82100676"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867248"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "984477"
},
{
"name": "Jupyter Notebook",
"bytes": "550862"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1982867"
},
{
"name": "Makefile",
"bytes": "66496"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "317461"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37425809"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700106"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3613406"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import json
import mimetypes
import os
import logging
import redis
from django.core.urlresolvers import NoReverseMatch, reverse
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect, Http404, HttpResponseNotFound
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from django.views.decorators.csrf import csrf_view_exempt
from django.views.static import serve
from django.views.generic import TemplateView
from haystack.query import EmptySearchQuerySet
from haystack.query import SearchQuerySet
from docs.models.build import Build, Version
from docs.core.forms import FacetedSearchForm
from docs.models import Project, ImportedFile, ProjectRelationship
from docs.tasks import update_docs, remove_dir
from docs.utils import highest_version
log = logging.getLogger(__name__)
def homepage(request):
latest = (Project.objects.public(request.user).order_by('-modified_date')[:10])
featured = Project.objects.filter(featured=True)
return render_to_response('homepage.html', {'project_list': latest, 'featured_list': featured}, context_instance=RequestContext(request))
def random_page(request, project=None):
imp_file = ImportedFile.objects.order_by('?')
if project:
return HttpResponseRedirect((imp_file.filter(project__slug=project)[0]
.get_absolute_url()))
return HttpResponseRedirect(imp_file[0].get_absolute_url())
def queue_depth(request):
r = redis.Redis(**settings.REDIS)
return HttpResponse(r.llen('celery'))
def live_builds(request):
builds = Build.objects.filter(state='building')[:5]
WEBSOCKET_HOST = getattr(settings, 'WEBSOCKET_HOST', 'localhost:8088')
count = builds.count()
percent = 100
if count > 1:
percent = 100 / count
return render_to_response('all_builds.html',
{'builds': builds,
'build_percent': percent,
'WEBSOCKET_HOST': WEBSOCKET_HOST},
context_instance=RequestContext(request))
@csrf_view_exempt
def wipe_version(request, project_slug, version_slug):
version = get_object_or_404(Version, project__slug=project_slug,
slug=version_slug)
if request.user not in version.project.users.all():
raise Http404("You must own this project to wipe it.")
del_dir = version.project.checkout_path(version.slug)
if del_dir:
remove_dir.delay(del_dir)
return render_to_response('wipe_version.html',
{'del_dir': del_dir,
'deleted': True},
context_instance=RequestContext(request))
return render_to_response('wipe_version.html',
{'del_dir': del_dir},
context_instance=RequestContext(request))
@csrf_view_exempt
def github_build(request):
"""
A post-commit hook for github.
"""
if request.method == 'POST':
obj = json.loads(request.POST['payload'])
name = obj['repository']['name']
url = obj['repository']['url']
ghetto_url = url.replace('http://', '').replace('https://', '')
branch = obj['ref'].replace('refs/heads/', '')
log.info("(Github Build) %s:%s" % (ghetto_url, branch))
version_pk = None
version_slug = branch
try:
projects = Project.objects.filter(repo__contains=ghetto_url)
for project in projects:
version = project.version_from_branch_name(branch)
if version:
log.info(("(Github Build) Processing %s:%s"
% (project.slug, version.slug)))
default = project.default_branch or (project.vcs_repo()
.fallback_branch)
if branch == default:
# Short circuit versions that are default
# These will build at "latest", and thus won't be
# active
version = project.versions.get(slug='latest')
version_pk = version.pk
version_slug = version.slug
log.info(("(Github Build) Building %s:%s"
% (project.slug, version.slug)))
elif version in project.versions.exclude(active=True):
log.info(("(Github Build) Not building %s"
% version.slug))
return HttpResponseNotFound(('Not Building: %s'
% branch))
else:
version_pk = version.pk
version_slug = version.slug
log.info(("(Github Build) Building %s:%s"
% (project.slug, version.slug)))
else:
version_slug = 'latest'
branch = 'latest'
log.info(("(Github Build) Building %s:latest"
% project.slug))
# version_pk being None means it will use "latest"
update_docs.delay(pk=project.pk, version_pk=version_pk,
force=True)
return HttpResponse('Build Started: %s' % version_slug)
except Exception, e:
log.error("(Github Build) Failed: %s:%s" % (name, e))
#handle new repos
project = Project.objects.filter(repo__contains=ghetto_url)
if not len(project):
project = Project.objects.filter(name__icontains=name)
if len(project):
#Bail if we think this thing exists
return HttpResponseNotFound('Build Failed')
#create project
try:
email = obj['repository']['owner']['email']
desc = obj['repository']['description']
homepage = obj['repository']['homepage']
repo = obj['repository']['url']
user = User.objects.get(email=email)
proj = Project.objects.create(
name=name,
description=desc,
project_url=homepage,
repo=repo,
)
proj.users.add(user)
log.error("Created new project %s" % (proj))
except Exception, e:
log.error("Error creating new project %s: %s" % (name, e))
return HttpResponseNotFound('Build Failed')
return HttpResponseNotFound('Build Failed')
else:
return redirect('builds_project_list', project.slug)
@csrf_view_exempt
def bitbucket_build(request):
if request.method == 'POST':
obj = json.loads(request.POST['payload'])
rep = obj['repository']
name = rep['name']
url = "%s%s" % ("bitbucket.org", rep['absolute_url'].rstrip('/'))
log.info("(Bitbucket Build) %s" % (url))
try:
project = Project.objects.filter(repo__contains=url)[0]
update_docs.delay(pk=project.pk, force=True)
return HttpResponse('Build Started')
except Exception, e:
log.error("(Github Build) Failed: %s:%s" % (name, e))
return HttpResponseNotFound('Build Failed')
else:
return redirect('builds_project_list', project.slug)
@csrf_view_exempt
def generic_build(request, pk):
project = Project.objects.get(pk=pk)
context = {'built': False, 'project': project}
if request.method == 'POST':
context['built'] = True
slug = request.POST.get('version_slug', None)
if slug:
version = project.versions.get(slug=slug)
update_docs.delay(pk=pk, version_pk=version.pk, force=True)
else:
update_docs.delay(pk=pk, force=True)
# return HttpResponse('Build Started')
return redirect('builds_project_list', project.slug)
return redirect('builds_project_list', project.slug)
def subdomain_handler(request, lang_slug=None, version_slug=None, filename=''):
"""This provides the fall-back routing for subdomain requests.
This was made primarily to redirect old subdomain's to their version'd
brothers.
"""
project = get_object_or_404(Project, slug=request.slug)
# Don't add index.html for htmldir.
if not filename and project.documentation_type != 'sphinx_htmldir':
filename = "index.html"
if version_slug is None:
# Handle / on subdomain.
default_version = project.get_default_version()
url = reverse(serve_docs, kwargs={
'version_slug': default_version,
'lang_slug': project.language,
'filename': filename
})
return HttpResponseRedirect(url)
if version_slug and lang_slug is None:
# Handle /version/ on subdomain.
aliases = project.aliases.filter(from_slug=version_slug)
# Handle Aliases.
if aliases.count():
if aliases[0].largest:
highest_ver = highest_version(project.versions.filter(
slug__contains=version_slug, active=True))
version_slug = highest_ver[0].slug
else:
version_slug = aliases[0].to_slug
url = reverse(serve_docs, kwargs={
'version_slug': version_slug,
'lang_slug': project.language,
'filename': filename
})
else:
try:
url = reverse(serve_docs, kwargs={
'version_slug': version_slug,
'lang_slug': project.language,
'filename': filename
})
except NoReverseMatch:
raise Http404
return HttpResponseRedirect(url)
# Serve normal docs
return serve_docs(request=request,
project_slug=project.slug,
lang_slug=lang_slug,
version_slug=version_slug,
filename=filename)
def subproject_serve_docs(request, project_slug, lang_slug=None,
version_slug=None, filename=''):
parent_slug = request.slug
proj = get_object_or_404(Project, slug=project_slug)
subproject_qs = ProjectRelationship.objects.filter(
parent__slug=parent_slug, child__slug=project_slug)
if lang_slug is None or version_slug is None:
# Handle /
version_slug = proj.get_default_version()
url = reverse('subproject_docs_detail', kwargs={
'project_slug': project_slug,
'version_slug': version_slug,
'lang_slug': proj.language,
'filename': filename
})
return HttpResponseRedirect(url)
if subproject_qs.exists():
return serve_docs(request, lang_slug, version_slug, filename,
project_slug)
else:
log.info('Subproject lookup failed: %s:%s' % (project_slug,
parent_slug))
raise Http404("Subproject does not exist")
def serve_docs(request, lang_slug, version_slug, filename, project_slug=None):
if not project_slug:
project_slug = request.slug
proj = get_object_or_404(Project, slug=project_slug)
# Redirects
if not version_slug or not lang_slug:
version_slug = proj.get_default_version()
url = reverse(serve_docs, kwargs={
'project_slug': project_slug,
'version_slug': version_slug,
'lang_slug': proj.language,
'filename': filename
})
return HttpResponseRedirect(url)
ver = get_object_or_404(Version, project__slug=project_slug,
slug=version_slug)
# Auth checks
if ver not in proj.versions.public(request.user, proj):
res = HttpResponse("You don't have access to this version.")
res.status_code = 401
return res
# Normal handling
if not filename:
filename = "index.html"
# This is required because we're forming the filenames outselves instead of
# letting the web server do it.
elif (proj.documentation_type == 'sphinx_htmldir'
and "_static" not in filename
and "_images" not in filename
and "html" not in filename
and not "inv" in filename):
filename += "index.html"
else:
filename = filename.rstrip('/')
# Use the old paths if we're on our old location.
# Otherwise use the new language symlinks.
# This can be removed once we have 'en' symlinks for every project.
if lang_slug == proj.language:
basepath = proj.rtd_build_path(version_slug)
else:
basepath = proj.translations_path(lang_slug)
basepath = os.path.join(basepath, version_slug)
log.info('Serving %s for %s' % (filename, proj))
if not settings.DEBUG:
fullpath = os.path.join(basepath, filename)
mimetype, encoding = mimetypes.guess_type(fullpath)
mimetype = mimetype or 'application/octet-stream'
response = HttpResponse(mimetype=mimetype)
if encoding:
response["Content-Encoding"] = encoding
try:
response['X-Accel-Redirect'] = os.path.join('/user_builds',
proj.slug,
'rtd-builds',
version_slug, filename)
except UnicodeEncodeError:
raise Http404
return response
else:
return serve(request, filename, basepath)
def server_error(request, template_name='500.html'):
"""
A simple 500 handler so we get media
"""
r = render_to_response(template_name,
context_instance=RequestContext(request))
r.status_code = 500
return r
def server_error_404(request, template_name='404.html'):
"""
A simple 500 handler so we get media
"""
r = render_to_response(template_name,
context_instance=RequestContext(request))
r.status_code = 404
return r
def divide_by_zero(request):
return 1 / 0
def morelikethis(request, project_slug, filename):
project = get_object_or_404(Project, slug=project_slug)
file = get_object_or_404(ImportedFile, project=project, path=filename)
# sqs = SearchQuerySet().filter(project=project).more_like_this(file)[:5]
sqs = SearchQuerySet().more_like_this(file)[:5]
if len(sqs):
output = [(obj.title, obj.absolute_url) for obj in sqs]
json_response = json.dumps(output)
else:
json_response = {"message": "Not Found"}
jsonp = "%s(%s)" % (request.GET.get('callback'), json_response)
return HttpResponse(jsonp, mimetype='text/javascript')
class SearchView(TemplateView):
template_name = "search/base_facet.html"
results = EmptySearchQuerySet()
form_class = FacetedSearchForm
form = None
query = ''
selected_facets = None
selected_facets_list = None
def get_context_data(self, request, **kwargs):
context = super(SearchView, self).get_context_data(**kwargs)
context['request'] = self.request
# causes solr request #1
context['facets'] = self.results.facet_counts()
context['form'] = self.form
context['query'] = self.query
context['selected_facets'] = ('&'.join(self.selected_facets)
if self.selected_facets else '')
context['selected_facets_list'] = self.selected_facets_list
context['results'] = self.results
context['count'] = len(self.results) # causes solr request #2
return context
def get(self, request, **kwargs):
"""
Performing the search causes three requests to be sent to Solr.
1. For the facets
2. For the count (unavoidable, as pagination will cause this anyay)
3. For the results
"""
self.request = request
self.form = self.build_form()
self.selected_facets = self.get_selected_facets()
self.selected_facets_list = self.get_selected_facets_list()
self.query = self.get_query()
if self.form.is_valid():
self.results = self.get_results()
context = self.get_context_data(request, **kwargs)
# For returning results partials for javascript
if request.is_ajax() or request.GET.get('ajax'):
self.template_name = 'search/faceted_results.html'
return self.render_to_response(context)
def build_form(self):
"""
Instantiates the form the class should use to process the search query.
"""
data = self.request.GET if len(self.request.GET) else None
return self.form_class(data, facets=('project',))
def get_selected_facets_list(self):
return [tuple(s.split(':')) for s in self.selected_facets if s]
def get_selected_facets(self):
"""
Returns the a list of facetname:value strings
e.g. [u'project_exact:Read The Docs', u'author_exact:Eric Holscher']
"""
return self.request.GET.getlist('selected_facets')
def get_query(self):
"""
Returns the query provided by the user.
Returns an empty string if the query is invalid.
"""
return self.request.GET.get('q')
def get_results(self):
"""
Fetches the results via the form.
"""
return self.form.search() | {
"content_hash": "689100d6d9e51a3e0b3b39c35a54a776",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 141,
"avg_line_length": 39.108932461873636,
"alnum_prop": 0.5746197983399254,
"repo_name": "indexofire/gork",
"id": "09aa24f40d3cdb54155f7d48c8efd487f595027b",
"size": "17975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/gork/application/__docs/views/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "199039"
},
{
"name": "JavaScript",
"bytes": "89817"
},
{
"name": "Python",
"bytes": "1120919"
},
{
"name": "Shell",
"bytes": "6713"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_vswitch_info
short_description: Gathers info about an ESXi host's vswitch configurations
description:
- This module can be used to gather information about an ESXi host's vswitch configurations when ESXi hostname or Cluster name is given.
- The vSphere Client shows the value for the number of ports as elastic from vSphere 5.5 and above.
- Other tools like esxcli might show the number of ports as 1536 or 5632.
- See U(https://kb.vmware.com/s/article/2064511) for more details.
version_added: '2.9'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Info about vswitch belonging to every ESXi host systems under this cluster will be returned.
- If C(esxi_hostname) is not given, this parameter is required.
type: str
esxi_hostname:
description:
- ESXi hostname to gather information from.
- If C(cluster_name) is not given, this parameter is required.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather vswitch info about all ESXi Host in given Cluster
vmware_vswitch_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
delegate_to: localhost
register: all_hosts_vswitch_info
- name: Gather firewall info about ESXi Host
vmware_vswitch_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
delegate_to: localhost
register: all_vswitch_info
'''
RETURN = r'''
hosts_vswitch_info:
description: metadata about host's vswitch configuration
returned: on success
type: dict
sample: {
"10.76.33.218": {
"vSwitch0": {
"mtu": 1500,
"num_ports": 128,
"pnics": [
"vmnic0"
]
},
"vSwitch_0011": {
"mtu": 1500,
"num_ports": 128,
"pnics": [
"vmnic2",
"vmnic1"
]
},
},
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
class VswitchInfoManager(PyVmomi):
"""Class to gather vSwitch info"""
def __init__(self, module):
super(VswitchInfoManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
if not self.hosts:
self.module.fail_json(msg="Failed to find host system.")
@staticmethod
def serialize_pnics(vswitch_obj):
"""Get pnic names"""
pnics = []
for pnic in vswitch_obj.pnic:
# vSwitch contains all PNICs as string in format of 'key-vim.host.PhysicalNic-vmnic0'
pnics.append(pnic.split("-", 3)[-1])
return pnics
def gather_vswitch_info(self):
"""Gather vSwitch info"""
hosts_vswitch_info = dict()
for host in self.hosts:
network_manager = host.configManager.networkSystem
if network_manager:
temp_switch_dict = dict()
for available_vswitch in network_manager.networkInfo.vswitch:
temp_switch_dict[available_vswitch.name] = dict(
pnics=self.serialize_pnics(available_vswitch),
mtu=available_vswitch.mtu,
# we need to use the spec to get the ports
# otherwise, the output might be different compared to the vswitch config module
# (e.g. 5632 ports instead of 128)
num_ports=available_vswitch.spec.numPorts
)
hosts_vswitch_info[host.name] = temp_switch_dict
return hosts_vswitch_info
def main():
"""Main"""
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True
)
vmware_vswitch_mgr = VswitchInfoManager(module)
module.exit_json(changed=False, hosts_vswitch_info=vmware_vswitch_mgr.gather_vswitch_info())
if __name__ == "__main__":
main()
| {
"content_hash": "d4d1d5fbf773ceab3b11e68a3eda72a2",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 136,
"avg_line_length": 32.9078947368421,
"alnum_prop": 0.6089564174330268,
"repo_name": "thaim/ansible",
"id": "0732547df50ec3ed2b1d364bd57eeca250ea3e0b",
"size": "5200",
"binary": false,
"copies": "20",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/cloud/vmware/vmware_vswitch_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
import jxmlease
project = u'jxmlease'
copyright = u'2015-2016, Juniper Networks'
author = jxmlease.__author__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = jxmlease.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'Juniper',
'github_repo': 'jxmlease',
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'jxmlease v1.0a1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = ''
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'jxmleasedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'jxmlease.tex', u'jxmlease Documentation',
u'Juniper Networks', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'jxmlease', u'jxmlease Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'jxmlease', u'jxmlease Documentation',
author, 'jxmlease', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "14ffabdf68fdc7303964a3a441cff386",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 80,
"avg_line_length": 32.24390243902439,
"alnum_prop": 0.7008861033066782,
"repo_name": "jonlooney/jxmlease",
"id": "ffafc9b02048048de6c21f9a14d26d6361eb732e",
"size": "9675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "200165"
}
],
"symlink_target": ""
} |
import sys
import mapper
class ExifReport(mapper.Job):
def map(self, photo):
seen = set()
for tag, _, value in photo['exif']:
if tag not in seen and value is not None and type(value) != list:
seen.add(tag)
yield tag, value
def reduce(self, key, values):
yield { 'tag' : key,
'count' : len(values),
'values' : self.distinct_sorted_by_freq(values)
}
def distinct_sorted_by_freq(self, values):
result = {}
for v in values:
if v in result:
result[v] += 1
else:
result[v] = 1
result = sorted(result.items(), cmp=lambda a,b: cmp(a[1], b[1]), reverse=True)
return [list(el) for el in result]
def main():
mapper.db['exif_tags'].remove({})
mapper.run(
mapper.db['photos'].find({'exif':{'$ne':[]}}),
mapper.db['exif_tags'],
ExifReport())
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "25e6af39db79cb814229a9890f72ec42",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 86,
"avg_line_length": 24.595238095238095,
"alnum_prop": 0.4937076476282672,
"repo_name": "andreisavu/exif-dataminer",
"id": "88ed81793477f92e79bbf5078ba7293bdd0819be",
"size": "1057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/build_reports.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "48857"
},
{
"name": "Python",
"bytes": "14227"
}
],
"symlink_target": ""
} |
from .common import random_str, auth_check
from rancher import ApiError
import pytest
def test_dns_fields(admin_pc_client):
auth_check(admin_pc_client.schema, 'dnsRecord', 'crud', {
'namespaceId': 'cr',
'projectId': 'cr',
'hostname': 'cru',
'allocateLoadBalancerNodePorts': 'cru',
'ipAddresses': 'cru',
'ipFamilies': 'cru',
'ipFamilyPolicy': 'cru',
'clusterIPs': 'cru',
'clusterIp': 'r',
'selector': 'cru',
'targetWorkloadIds': 'cru',
'workloadId': 'r',
'targetDnsRecordIds': 'cru',
'topologyKeys': 'cru',
'publicEndpoints': 'r',
'ports': 'r',
})
def test_dns_hostname(admin_pc, admin_cc_client):
client = admin_pc.client
ns = admin_cc_client.create_namespace(name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
dns_record = client.create_dns_record(name=name,
hostname='target',
namespaceId=ns.id)
assert dns_record.baseType == 'dnsRecord'
assert dns_record.type == 'dnsRecord'
assert dns_record.name == name
assert dns_record.hostname == 'target'
assert "clusterIp" not in dns_record
assert dns_record.namespaceId == ns.id
assert 'namespace' not in dns_record
assert dns_record.projectId == admin_pc.project.id
dns_record = client.update(dns_record, hostname='target2')
dns_record = client.reload(dns_record)
assert dns_record.baseType == 'dnsRecord'
assert dns_record.type == 'dnsRecord'
assert dns_record.name == name
assert dns_record.hostname == 'target2'
assert "clusterIp" not in dns_record
assert dns_record.namespaceId == ns.id
assert 'namespace' not in dns_record
assert dns_record.projectId == admin_pc.project.id
found = False
for i in client.list_dns_record():
if i.id == dns_record.id:
found = True
break
assert found
dns_record = client.by_id_dns_record(dns_record.id)
assert dns_record is not None
client.delete(dns_record)
def test_dns_ips(admin_pc, admin_cc_client):
client = admin_pc.client
ns = admin_cc_client.create_namespace(name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
dns_record = client.create_dns_record(name=name,
ipAddresses=['1.1.1.1',
'2.2.2.2'],
namespaceId=ns.id)
assert dns_record.baseType == 'dnsRecord'
assert dns_record.type == 'dnsRecord'
assert dns_record.name == name
assert 'hostname' not in dns_record
assert dns_record.ipAddresses == ['1.1.1.1', '2.2.2.2']
assert dns_record.clusterIp is None
assert dns_record.namespaceId == ns.id
assert 'namespace' not in dns_record
assert dns_record.projectId == admin_pc.project.id
dns_record = client.update(dns_record, ipAddresses=['1.1.1.2', '2.2.2.1'])
dns_record = client.reload(dns_record)
assert dns_record.baseType == 'dnsRecord'
assert dns_record.type == 'dnsRecord'
assert dns_record.name == name
assert 'hostname' not in dns_record
assert dns_record.ipAddresses == ['1.1.1.2', '2.2.2.1']
assert dns_record.clusterIp is None
assert dns_record.namespaceId == ns.id
assert 'namespace' not in dns_record
assert dns_record.projectId == admin_pc.project.id
dnsname = random_str()
with pytest.raises(ApiError) as e:
client.create_dns_record(name=dnsname,
ipAddresses=['127.0.0.2'],
namespaceId='default')
assert e.value.error.status == 422
found = False
for i in client.list_dns_record():
if i.id == dns_record.id:
found = True
break
assert found
dns_record = client.by_id_dns_record(dns_record.id)
assert dns_record is not None
client.delete(dns_record)
| {
"content_hash": "d4eeb872daa0e571e8837fd6f82856d9",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 78,
"avg_line_length": 33.32258064516129,
"alnum_prop": 0.5866408518877058,
"repo_name": "rancherio/rancher",
"id": "20d939458f239323056c48cd74174e839dca0d87",
"size": "4132",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/integration/suite/test_dns.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6795"
},
{
"name": "Shell",
"bytes": "25328"
}
],
"symlink_target": ""
} |
__author__ = 'greghines'
import numpy as np
import matplotlib.pyplot as plt
import pymongo
import cPickle as pickle
import os
import math
import sys
import urllib
import matplotlib.cbook as cbook
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
from clusterCompare import cluster_compare
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
penguins = pickle.load(open(base_directory+"/Databases/penguins_vote__.pickle","rb"))
#does this cluster have a corresponding cluster in the gold standard data?
#ie. does this cluster represent an actual penguin?
# #user penguins for first image - with 5 images
# print len(penguins[5][0])
# #user data
# print penguins[5][0][0]
# #gold standard data
# #print penguins[5][0][1]
#
# #users who annotated the first "penguin" in the first image
# print penguins[5][0][0][0][1]
# #and their corresponds points
# print penguins[5][0][0][0][0]
#have as a list not a tuple since we need the index
client = pymongo.MongoClient()
db = client['penguin_2014-10-22']
subject_collection = db["penguin_subjects"]
location_count = {}
for subject in subject_collection.find({"classification_count":20}):
zooniverse_id = subject["zooniverse_id"]
path = subject["metadata"]["path"]
slash_index = path.find("_")
location = path[:slash_index]
if subject["metadata"]["counters"]["animals_present"] > 10:
if not(location in location_count):
location_count[location] = 1
print location
print subject["location"]
else:
location_count[location] += 1
for location in sorted(location_count.keys()):
print location + " -- " + str(location_count[location])
assert False
#print gold_standard
#RESET
max_users = 20
image_index = 0
for image_index in range(len(penguins[20])):
#first - create a list of ALL users - so we can figure out who has annotated a "penguin" or hasn't
user_set = []
cluster_dict = {}
#image = penguins[max_users][image_index]
penguin_clusters = penguins[max_users][image_index][1]
zooniverse_id = penguins[max_users][image_index][0]
lowest_cluster = float("inf")
highest_cluster = -float('inf')
for penguin_index in range(len(penguin_clusters)):
users = penguin_clusters[penguin_index][1]
cluster = penguin_clusters[penguin_index][0]
center_x = np.mean(zip(*cluster)[0])
center_y = np.mean(zip(*cluster)[1])
lowest_cluster = min(lowest_cluster,center_y)
highest_cluster = max(highest_cluster,center_y)
cluster_dict[(center_x,center_y)] = users
mid_point = (lowest_cluster+highest_cluser)/2.
subject = subject_collection.find_one({"zooniverse_id":zooniverse_id})
url = subject["location"]["standard"]
object_id= str(subject["_id"])
image_path = base_directory+"/Databases/penguins/images/"+object_id+".JPG"
if not(os.path.isfile(image_path)):
urllib.urlretrieve(url, image_path)
image_file = cbook.get_sample_data(base_directory + "/Databases/penguins/images/"+object_id+".JPG")
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
error_found = False
#start with the bottom half
cluster_list = cluster_dict.keys()
relations = []
for i in range(len(cluster_list)-1):
c_1 = cluster_list[i]
for j in range(i+1,len(cluster_list)):
c_2 = cluster_list[j]
users_1 = cluster_dict[c_1]
users_2 = cluster_dict[c_2]
dist = math.sqrt((c_1[0]-c_2[0])**2+(c_1[1]-c_2[1])**2)
overlap = len([u for u in users_1 if (u in users_2)])
relations.append((dist,overlap,(i,j)))
relations.sort(key = lambda x:x[0])
user_relations = zip(*relations)[1]
cluster_tuples = zip(*relations)[2]
try:
closest_single_connection = user_relations.index(1)
if closest_single_connection > 0:
print "no error"
continue
print relations[0:10]
#we have an error
for ii in range(min(len(user_relations),1)):
if user_relations[ii] == 1:
print ii
i,j = cluster_tuples[ii]
c_1 = cluster_list[i]
c_2 = cluster_list[j]
#X,Y = zip(*cluster_list)
#plt.plot(X,Y,'o')
X,Y = zip(*(c_1,c_2))
plt.plot(X,Y,'-',color="blue")
plt.show()
except ValueError:
print "**"
| {
"content_hash": "cb596396e84ff6e8910a7531bdbb563e",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 103,
"avg_line_length": 29.074074074074073,
"alnum_prop": 0.6267515923566879,
"repo_name": "camallen/aggregation",
"id": "295a5516d015f8b3c8228d38fb86aeda011fc507",
"size": "4732",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "experimental/penguins/clusterAnalysis/anomalyDetection.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "723"
},
{
"name": "Python",
"bytes": "1676640"
},
{
"name": "Scala",
"bytes": "629"
},
{
"name": "Shell",
"bytes": "95"
}
],
"symlink_target": ""
} |
import logging
from abc import abstractmethod
from dataclasses import dataclass, field
from datetime import datetime
from typing import Any, Dict, Generator, Generic, Iterable, List, Optional, Type, TypeVar, Union
import pypeln
from pypeln.utils import A, BaseStage, Undefined
from tqdm import tqdm
from paralleldomain import Dataset, Scene
from paralleldomain.decoding.helper import decode_dataset
from paralleldomain.model.class_mapping import ClassMap
from paralleldomain.model.sensor import (
CameraSensor,
CameraSensorFrame,
LidarSensor,
LidarSensorFrame,
Sensor,
SensorDataTypes,
SensorFrame,
)
from paralleldomain.model.unordered_scene import UnorderedScene
from paralleldomain.utilities.any_path import AnyPath
logger = logging.getLogger(__name__)
class _TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg)
self.flush()
except Exception:
self.handleError(record)
logger.addHandler(_TqdmLoggingHandler())
TSceneType = TypeVar("TSceneType", Scene, UnorderedScene)
@dataclass
class PipelineItem(Generic[TSceneType]):
sensor_name: Optional[str]
frame_id: Optional[str]
scene_name: Optional[str]
dataset_path: Union[str, AnyPath]
dataset_format: str
decoder_kwargs: Dict[str, Any]
target_sensor_name: Optional[str]
scene_reference_timestamp: Optional[datetime]
custom_data: Dict[str, Any] = field(default_factory=dict)
is_end_of_scene: bool = False
is_end_of_dataset: bool = False
total_frames_in_scene: Optional[int] = -1
total_scenes_in_dataset: Optional[int] = -1
@property
def dataset(self) -> Dataset:
return decode_dataset(dataset_format=self.dataset_format, **self.decoder_kwargs)
@property
@abstractmethod
def scene(self) -> Optional[TSceneType]:
pass
@property
def sensor(self) -> Optional[Sensor]:
if self.sensor_name is not None:
return self.scene.get_sensor(sensor_name=self.sensor_name)
return None
@property
def sensor_frame(self) -> Optional[SensorFrame]:
if self.frame_id is not None:
return self.sensor.get_frame(frame_id=self.frame_id)
return None
@property
def camera_frame(self) -> Optional[CameraSensorFrame]:
if self.sensor is not None and isinstance(self.sensor, CameraSensor):
return self.sensor.get_frame(frame_id=self.frame_id)
return None
@property
def lidar_frame(self) -> Optional[LidarSensorFrame]:
if self.sensor is not None and isinstance(self.sensor, LidarSensor):
return self.sensor.get_frame(frame_id=self.frame_id)
return None
@dataclass
class ScenePipelineItem(PipelineItem[Scene]):
@property
def scene(self) -> Optional[Scene]:
if self.scene_name is not None:
return self.dataset.get_scene(scene_name=self.scene_name)
return None
@dataclass
class UnorderedScenePipelineItem(PipelineItem[UnorderedScene]):
@property
def scene(self) -> Optional[UnorderedScene]:
if self.scene_name is not None:
return self.dataset.get_unordered_scene(scene_name=self.scene_name)
return None
TPipelineItem = TypeVar("TPipelineItem", bound=PipelineItem)
DataType = Union[SensorDataTypes, Type[ClassMap]]
class EncodingFormat(Generic[TPipelineItem]):
@abstractmethod
def save_data(self, pipeline_item: TPipelineItem, data_type: DataType, data: Any):
pass
@abstractmethod
def supports_copy(self, pipeline_item: TPipelineItem, data_type: DataType, data_path: AnyPath):
pass
@abstractmethod
def save_sensor_frame(self, pipeline_item: TPipelineItem, data: Any = None):
pass
@abstractmethod
def save_scene(self, pipeline_item: TPipelineItem, data: Any = None):
pass
@abstractmethod
def save_dataset(self, pipeline_item: TPipelineItem, data: Any = None):
pass
class EncoderStep:
@abstractmethod
def apply(self, input_stage: Iterable[Any]) -> Iterable[Any]:
pass
class PipelineBuilder(Generic[TPipelineItem]):
@abstractmethod
def build_encoder_steps(self, encoding_format: EncodingFormat[TPipelineItem]) -> List[EncoderStep]:
pass
@abstractmethod
def build_pipeline_source_generator(self) -> Generator[TPipelineItem, None, None]:
pass
@property
@abstractmethod
def pipeline_item_unit_name(self):
pass
class DatasetPipelineEncoder(Generic[TPipelineItem]):
def __init__(
self,
pipeline_builder: PipelineBuilder[TPipelineItem],
encoding_format: EncodingFormat[TPipelineItem],
use_tqdm: bool = True,
):
self.encoding_format = encoding_format
self.pipeline_builder = pipeline_builder
self.use_tqdm = use_tqdm
@staticmethod
def build_pipeline(
source_generator: Generator[TPipelineItem, None, None], encoder_steps: List[EncoderStep]
) -> Union[BaseStage[A], Iterable[A], Undefined]:
stage = source_generator
for encoder in encoder_steps:
stage = encoder.apply(input_stage=stage)
return stage
def encode_dataset(self):
stage = self.pipeline_builder.build_pipeline_source_generator()
encoder_steps = self.pipeline_builder.build_encoder_steps(encoding_format=self.encoding_format)
stage = self.build_pipeline(source_generator=stage, encoder_steps=encoder_steps)
stage = pypeln.thread.to_iterable(stage)
if self.use_tqdm:
stage = tqdm(
stage,
desc="Encoding Progress",
unit=f" {self.pipeline_builder.pipeline_item_unit_name}",
smoothing=0.0,
)
for _ in stage:
pass
@classmethod
def from_builder(
cls,
pipeline_builder: PipelineBuilder,
encoding_format: EncodingFormat,
use_tqdm: bool = True,
**kwargs,
) -> "DatasetPipelineEncoder":
return cls(use_tqdm=use_tqdm, pipeline_builder=pipeline_builder, encoding_format=encoding_format)
| {
"content_hash": "6e429a069d3baa158cd92db208be8a12",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 105,
"avg_line_length": 30.381642512077295,
"alnum_prop": 0.6751470822070281,
"repo_name": "parallel-domain/pd-sdk",
"id": "d570656242f748b55ef5247b57462774099a9dff",
"size": "6289",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "paralleldomain/encoding/pipeline_encoder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1030434"
},
{
"name": "Shell",
"bytes": "1375"
}
],
"symlink_target": ""
} |
#! /usr/bin/python
"""
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Some assumptions:
1. the supported query is SSBM query
2. the fact table is the left child of the the join node and the dimension table is the right table
3. the size of the dimension table is small enough to be fit into GPU memory.
4. currently each thread will try to allocate its needed gpu memory before executing.
if it fails, it can wait until it gets the memory. There may be deadlocks here.
5. the current relation supported in where list is only AND, OR
6. the data to be grouped by and ordered by are small enough to be fit into gpu memory
7. dimension tables are not compressed.
"""
import sys
import commands
import os.path
import copy
import ystree
import correlation
import config
import pickle
schema = None
keepInGpu = 1
"""
Get the value of the configurable variables from config.py
"""
joinType = config.joinType
POS = config.POS
SOA = config.SOA
CODETYPE = config.CODETYPE
PID = config.PID
DTYPE = config.DTYPE
"""
Generate C style declaration of a given column.
The name of the variable is the lower cased column name.
"""
def column_to_variable(col):
res = ""
if col.column_type in ["INTEGER","DATE"]:
res = "int " + col.column_name.lower() + ";"
elif col.column_type in ["DECIMAL"]:
res = "float " + col.column_name.lower() + ";"
elif col.column_type in ["TEXT"]:
res = "char " + col.column_name.lower() + "[" + str(col.column_others) + "];"
return res
"""
Generate schema.h file from the defined table schema.
"""
def generate_schema_file():
global schema
schema = ystree.global_table_dict
fo = open("schema.h","w")
print >>fo, "/* This file is generated by code_gen.py */"
print >>fo, "#ifndef __SCHEMA_H__"
print >>fo, "#define __SCHEMA_H__"
for tn in schema.keys():
print >>fo, "\tstruct " + tn.lower() + " {"
for col in schema[tn].column_list:
print >>fo, "\t\t" + column_to_variable(col)
print >>fo, "\t};\n"
print >>fo, "#endif"
fo.close()
"""
generate_soa generates a python script that will help transform
data from AOS to SOA. This is only for comparing the performance
of SOA with AOS.
"""
def generate_soa():
global schema
schema = ystree.global_table_dict
fo = open("soa.py","w")
print >>fo, "#! /usr/bin/python"
print >>fo, "import os\n"
print >>fo, "cmd = \"\""
for tn in schema.keys():
attrLen = len(schema[tn].column_list)
for i in range(0,attrLen):
col = schema[tn].column_list[i]
if col.column_type == "TEXT":
print >>fo, "cmd = \"./soa " + tn + str(i) + " " + str(col.column_others) + "\""
print >>fo, "os.system(cmd)"
fo.close()
os.system("chmod +x ./soa.py")
"""
generate_loader will generate the load.c which will transform
the row-stored text raw data into column-stored binary data.
"""
def generate_loader():
global schema
schema = ystree.global_table_dict
fo = open("load.c","w")
print >>fo, "/* This file is generated by code_gen.py */"
print >>fo, "#define _FILE_OFFSET_BITS 64"
print >>fo, "#define _LARGEFILE_SOURCE"
print >>fo, "#include <stdio.h>"
print >>fo, "#include <stdlib.h>"
print >>fo, "#include <error.h>"
print >>fo, "#include <unistd.h>"
print >>fo, "#include <string.h>"
print >>fo, "#include <getopt.h>"
print >>fo, "#include <linux/limits.h>"
print >>fo, "#include \"../include/schema.h\""
print >>fo, "#include \"../include/common.h\""
print >>fo, "#define CHECK_POINTER(p) do {\\"
print >>fo, "\tif(p == NULL){ \\"
print >>fo, "\t\tperror(\"Failed to allocate host memory\"); \\"
print >>fo, "\t\texit(-1); \\"
print >>fo, "\t}} while(0)"
print >>fo, "static char delimiter = '|';"
for tn in schema.keys():
attrLen = len(schema[tn].column_list)
print >>fo, "void " + tn.lower() + " (FILE *fp, char *outName){\n"
print >>fo, "\tstruct " + tn.lower() + " tmp;"
print >>fo, "\tchar data [1024] = {0};"
print >>fo, "\tchar buf[1024] = {0};"
print >>fo, "\tint count = 0, i = 0,prev = 0;"
print >>fo, "\tlong tupleCount =0, tupleRemain = 0, tupleUnit = 0;"
print >>fo, "\tFILE * out[" + str(attrLen) + "];\n"
print >>fo, "\tfor(i=0;i<" + str(attrLen) + ";i++){"
print >>fo, "\t\tchar path[PATH_MAX] = {0};"
print >>fo, "\t\tsprintf(path,\"%s%d\",outName,i);"
print >>fo, "\t\tout[i] = fopen(path, \"w\");"
print >>fo, "\t\tif(!out[i]){"
print >>fo, "\t\t\tprintf(\"Failed to open %s\\n\",path);"
print >>fo, "\t\t\texit(-1);"
print >>fo, "\t\t}"
print >>fo, "\t}\n"
print >>fo, "\tstruct columnHeader header;"
print >>fo, "\tlong tupleNum = 0;"
print >>fo, "\twhile(fgets(buf,sizeof(buf),fp) !=NULL)"
print >>fo, "\t\ttupleNum ++;\n"
print >>fo, "\theader.totalTupleNum = tupleNum;"
print >>fo, "\ttupleRemain = tupleNum;"
print >>fo, "\tif(tupleNum > BLOCKNUM)"
print >>fo, "\t\ttupleUnit = BLOCKNUM;"
print >>fo, "\telse"
print >>fo, "\t\ttupleUnit = tupleNum;"
print >>fo, "\theader.tupleNum = tupleUnit;"
print >>fo, "\theader.format = UNCOMPRESSED;"
print >>fo, "\theader.blockId = 0;"
print >>fo, "\theader.blockTotal = (tupleNum + BLOCKNUM -1) / BLOCKNUM ;"
print >>fo, "\tfseek(fp,0,SEEK_SET);"
for i in range(0,attrLen):
col = schema[tn].column_list[i]
if col.column_type == "INTEGER" or col.column_type == "DATE":
print >>fo, "\theader.blockSize = header.tupleNum * sizeof(int);"
elif col.column_type == "DECIMAL":
print >>fo, "\theader.blockSize = header.tupleNum * sizeof(float);"
elif col.column_type == "TEXT":
print >>fo, "\theader.blockSize = header.tupleNum * " + str(col.column_others) + ";"
print >>fo, "\tfwrite(&header, sizeof(struct columnHeader), 1, out[" + str(i) + "]);"
print >>fo, "\twhile(fgets(buf,sizeof(buf),fp)!= NULL){"
print >>fo, "\t\tint writeHeader = 0;"
print >>fo, "\t\ttupleCount ++;"
print >>fo, "\t\tif(tupleCount > BLOCKNUM){"
print >>fo, "\t\t\ttupleCount = 1;"
print >>fo, "\t\t\ttupleRemain -= BLOCKNUM;"
print >>fo, "\t\t\tif (tupleRemain > BLOCKNUM)"
print >>fo, "\t\t\t\ttupleUnit = BLOCKNUM;"
print >>fo, "\t\t\telse"
print >>fo, "\t\t\t\ttupleUnit = tupleRemain;"
print >>fo, "\t\t\theader.tupleNum = tupleUnit;"
print >>fo, "\t\t\theader.blockId ++;"
print >>fo, "\t\t\twriteHeader = 1;"
print >>fo, "\t\t}"
print >>fo, "\t\tfor(i = 0, prev = 0,count=0; buf[i] !='\\n';i++){"
print >>fo, "\t\t\tif (buf[i] == delimiter){"
print >>fo, "\t\t\t\tmemset(data,0,sizeof(data));"
print >>fo, "\t\t\t\tstrncpy(data,buf+prev,i-prev);"
print >>fo, "\t\t\t\tprev = i+1;"
print >>fo, "\t\t\t\tswitch(count){"
for i in range(0,attrLen):
col = schema[tn].column_list[i]
print >>fo, "\t\t\t\t\t case " + str(i) + ":"
if col.column_type == "INTEGER" or col.column_type == "DATE":
print >>fo, "\t\t\t\t\t\tif(writeHeader == 1){"
print >>fo, "\t\t\t\t\t\t\theader.blockSize = header.tupleNum * sizeof(int);"
print >>fo, "\t\t\t\t\t\t\tfwrite(&header,sizeof(struct columnHeader),1,out[" + str(i) + "]);"
print >>fo, "\t\t\t\t\t\t}"
print >>fo, "\t\t\t\t\t\ttmp."+str(col.column_name.lower()) + " = strtol(data,NULL,10);"
print >>fo, "\t\t\t\t\t\tfwrite(&(tmp." + str(col.column_name.lower()) + "),sizeof(int),1,out["+str(i) + "]);"
elif col.column_type == "DECIMAL":
print >>fo, "\t\t\t\t\t\tif(writeHeader == 1){"
print >>fo, "\t\t\t\t\t\t\theader.blockSize = header.tupleNum * sizeof(float);"
print >>fo, "\t\t\t\t\t\t\tfwrite(&header,sizeof(struct columnHeader),1,out[" + str(i) + "]);"
print >>fo, "\t\t\t\t\t\t}"
print >>fo, "\t\t\t\t\t\ttmp."+str(col.column_name.lower()) + " = atof(data);"
print >>fo, "\t\t\t\t\t\tfwrite(&(tmp." + str(col.column_name.lower()) + "),sizeof(float),1,out["+str(i) + "]);"
elif col.column_type == "TEXT":
print >>fo, "\t\t\t\t\t\tif(writeHeader == 1){"
print >>fo, "\t\t\t\t\t\t\theader.blockSize = header.tupleNum * " + str(col.column_others) + ";"
print >>fo, "\t\t\t\t\t\t\tfwrite(&header,sizeof(struct columnHeader),1,out[" + str(i) + "]);"
print >>fo, "\t\t\t\t\t\t}"
print >>fo, "\t\t\t\t\t\tstrcpy(tmp." + str(col.column_name.lower()) + ",data);"
print >>fo, "\t\t\t\t\t\tfwrite(&(tmp." + str(col.column_name.lower()) + "),sizeof(tmp." +str(col.column_name.lower()) + "), 1, out[" + str(i) + "]);"
print >>fo, "\t\t\t\t\t\tbreak;"
print >>fo, "\t\t\t\t}"
print >>fo, "\t\t\t\tcount++;"
print >>fo, "\t\t\t}"
print >>fo, "\t\t}"
print >>fo, "\t\tif(count == " + str(attrLen-1) + "){"
col = schema[tn].column_list[attrLen-1]
if col.column_type == "INTEGER" or col.column_type == "DATE":
print >>fo, "\t\t\tif(writeHeader == 1){"
print >>fo, "\t\t\t\theader.blockSize = header.tupleNum * sizeof(int);"
print >>fo, "\t\t\t\tfwrite(&header,sizeof(struct columnHeader),1,out[" + str(attrLen-1) + "]);"
print >>fo, "\t\t\t}"
print >>fo, "\t\t\tmemset(data,0,sizeof(data));"
print >>fo, "\t\t\tstrncpy(data,buf+prev,i-prev);"
print >>fo, "\t\t\ttmp."+str(col.column_name.lower()) + " = strtol(data,NULL,10);"
print >>fo, "\t\t\tfwrite(&(tmp." + str(col.column_name.lower()) + "),sizeof(int),1,out["+str(attrLen-1) + "]);"
elif col.column_type == "DECIMAL":
print >>fo, "\t\t\tif(writeHeader == 1){"
print >>fo, "\t\t\t\theader.blockSize = header.tupleNum * sizeof(float);"
print >>fo, "\t\t\t\tfwrite(&header,sizeof(struct columnHeader),1,out[" + str(attrLen-1) + "]);"
print >>fo, "\t\t\t}"
print >>fo, "\t\t\tmemset(data,0,sizeof(data));"
print >>fo, "\t\t\tstrncpy(data,buf+prev,i-prev);"
print >>fo, "\t\t\ttmp."+str(col.column_name.lower()) + " = atof(data);"
print >>fo, "\t\t\tfwrite(&(tmp." + str(col.column_name.lower()) + "),sizeof(float),1,out["+str(i) + "]);"
elif col.column_type == "TEXT":
print >>fo, "\t\t\tif(writeHeader == 1){"
print >>fo, "\t\t\t\theader.blockSize = header.tupleNum * " + str(col.column_others) + ";"
print >>fo, "\t\t\t\tfwrite(&header,sizeof(struct columnHeader),1,out[" + str(attrLen-1) + "]);"
print >>fo, "\t\t\t}"
print >>fo, "\t\t\tstrncpy(tmp." + str(col.column_name.lower()) + ",buf+prev,i-prev);"
print >>fo, "\t\t\tfwrite(&(tmp." + str(col.column_name.lower()) + "),sizeof(tmp." +str(col.column_name.lower()) + "), 1, out[" + str(attrLen-1) + "]);"
print >>fo, "\t\t}"
print >>fo, "\t}\n" ### end of reading from input file
print >>fo, "\tfor(i=0;i<" + str(attrLen) + ";i++){"
print >>fo, "\t\tfclose(out[i]);"
print >>fo, "\t}"
print >>fo, "\n}\n"
print >>fo, "int main(int argc, char ** argv){\n"
print >>fo, "\tFILE * in = NULL, *out = NULL;"
print >>fo, "\tint table;"
print >>fo, "\tint setPath = 0;"
print >>fo, "\tchar path[PATH_MAX];"
print >>fo, "\tchar cwd[PATH_MAX];"
print >>fo, "\t"
print >>fo, "\tint long_index;"
print >>fo, "\tstruct option long_options[] = {"
for i in range(0, len(schema.keys())):
print >>fo, "\t\t{\"" + schema.keys()[i].lower()+ "\",required_argument,0,'" + str(i) + "'},"
print >>fo, "\t\t{\"delimiter\",required_argument,0,'" +str(i+1) + "'},"
print >>fo, "\t\t{\"datadir\",required_argument,0,'" +str(i+2) + "'}"
print >>fo, "\t};\n"
print >>fo, "\twhile((table=getopt_long(argc,argv,\"\",long_options,&long_index))!=-1){"
print >>fo, "\t\tswitch(table){"
print >>fo, "\t\t\tcase '6':"
print >>fo, "\t\t\t\tsetPath = 1;"
print >>fo, "\t\t\t\tstrcpy(path,optarg);"
print >>fo, "\t\t\t\tbreak;"
print >>fo, "\t\t}"
print >>fo, "\t}\n"
print >>fo, "\toptind=1;\n"
print >>fo, "\tgetcwd(cwd,PATH_MAX);"
print >>fo, "\twhile((table=getopt_long(argc,argv,\"\",long_options,&long_index))!=-1){"
print >>fo, "\t\tswitch(table){"
for i in range(0, len(schema.keys())):
print >>fo, "\t\t\tcase '" + str(i) + "':"
print >>fo, "\t\t\t\tin = fopen(optarg,\"r\");"
print >>fo, "\t\t\t\tif(!in){"
print >>fo, "\t\t\t\t\tprintf(\"Failed to open %s\\n\",optarg);"
print >>fo, "\t\t\t\t\texit(-1);"
print >>fo, "\t\t\t\t}"
print >>fo, "\t\t\t\tif (setPath == 1){"
print >>fo, "\t\t\t\t\tchdir(path);"
print >>fo, "\t\t\t\t}"
print >>fo, "\t\t\t\t" + schema.keys()[i].lower() + "(in,\"" + schema.keys()[i] + "\");"
print >>fo, "\t\t\t\tif (setPath == 1){"
print >>fo, "\t\t\t\t\tchdir(cwd);"
print >>fo, "\t\t\t\t}"
print >>fo, "\t\t\t\tfclose(in);"
print >>fo, "\t\t\t\tbreak;"
print >>fo, "\t\t\tcase '" + str(i+1) + "':"
print >>fo, "\t\t\t\tdelimiter = optarg[0];"
print >>fo, "\t\t\t\tbreak;"
print >>fo, "\t\t}"
print >>fo, "\t}\n"
print >>fo, "\treturn 0;"
print >>fo, "}\n"
fo.close()
class columnAttr(object):
type = None
size = None
def __init__ (self):
self.type = ""
self.size = 0
class JoinTranslation(object):
dimTables = None
factTables = None
joinNode = None
dimIndex = None
factIndex = None
outIndex = None
outAttr = None
outPos = None
def __init__ (self):
self.dimTables = []
self.factTables = []
self.joinNode = []
self.dimIndex = []
self.factIndex = []
self.outIndex = []
self.outAttr = []
self.outPos = []
def __get_gb_exp__(exp,tmp_list):
if not isinstance(exp,ystree.YFuncExp):
return
if exp.func_name in ["SUM","AVG","COUNT","MAX","MIN"]:
tmp_list.append(exp)
else:
for x in exp.parameter_list:
__get_gb_exp__(x,tmp_list)
def get_gbexp_list(exp_list,gb_exp_list):
for exp in exp_list:
if not isinstance(exp,ystree.YFuncExp):
continue
tmp_list = []
__get_gb_exp__(exp,tmp_list)
for tmp in tmp_list:
tmp_bool = False
for gb_exp in gb_exp_list:
if tmp.compare(gb_exp) is True:
tmp_bool = True
break
if tmp_bool is False:
gb_exp_list.append(tmp)
"""
get_tables() gets the translation information for join, agg and order by nodes.
Currently we only support star schema queries.
We assume that the dimTable is always the right child of the join node.
"""
def get_tables(tree, joinAttr, aggNode, orderbyNode):
if isinstance(tree, ystree.TableNode):
joinAttr.factTables.append(tree)
return
elif isinstance(tree, ystree.OrderByNode):
obNode = copy.deepcopy(tree)
orderbyNode.append(obNode)
get_tables(tree.child, joinAttr, aggNode, orderbyNode)
elif isinstance(tree, ystree.GroupByNode):
gbNode = copy.deepcopy(tree)
aggNode.append(gbNode)
get_tables(tree.child, joinAttr, aggNode, orderbyNode)
elif isinstance(tree, ystree.TwoJoinNode):
leftIndex = []
rightIndex = []
leftAttr = []
rightAttr = []
leftPos = []
rightPos = []
newNode = copy.deepcopy(tree)
joinAttr.joinNode.insert(0,newNode)
for exp in tree.select_list.tmp_exp_list:
index = tree.select_list.tmp_exp_list.index(exp)
if isinstance(exp,ystree.YRawColExp):
colAttr = columnAttr()
colAttr.type = exp.column_type
if exp.table_name == "LEFT":
if joinType == 0:
leftIndex.append(exp.column_name)
elif joinType == 1:
newExp = ystree.__trace_to_leaf__(tree,exp,False)
leftIndex.append(newExp.column_name)
leftAttr.append(colAttr)
leftPos.append(index)
elif exp.table_name == "RIGHT":
if joinType == 0:
rightIndex.append(exp.column_name)
elif joinType == 1:
newExp = ystree.__trace_to_leaf__(tree,exp,False)
rightIndex.append(newExp.column_name)
rightAttr.append(colAttr)
rightPos.append(index)
outList= []
outList.append(leftIndex)
outList.append(rightIndex)
outAttr = []
outAttr.append(leftAttr)
outAttr.append(rightAttr)
outPos = []
outPos.append(leftPos)
outPos.append(rightPos)
joinAttr.outIndex.insert(0,outList)
joinAttr.outAttr.insert(0, outAttr)
joinAttr.outPos.insert(0, outPos)
pkList = tree.get_pk()
if (len(pkList[0]) != len(pkList[1])):
print 1/0
if joinType == 0:
for exp in pkList[0]:
colIndex = 0
if isinstance(tree.left_child, ystree.TableNode):
colIndex = -1
for tmp in tree.left_child.select_list.tmp_exp_list:
if exp.column_name == tmp.column_name:
colIndex = tree.left_child.select_list.tmp_exp_list.index(tmp)
break
if colIndex == -1:
print 1/0
else:
colIndex = exp.column_name
elif joinType == 1:
for exp in pkList[0]:
newExp = ystree.__trace_to_leaf__(tree,exp,True)
colIndex = newExp.column_name
joinAttr.factIndex.insert(0, colIndex)
for exp in pkList[1]:
colIndex = 0
if isinstance(tree.right_child, ystree.TableNode):
colIndex = -1
for tmp in tree.right_child.select_list.tmp_exp_list:
if exp.column_name == tmp.column_name:
colIndex = tree.right_child.select_list.tmp_exp_list.index(tmp)
break
if colIndex == -1:
print 1/0
else:
colIndex = exp.column_name
joinAttr.dimIndex.insert(0, colIndex)
if isinstance(tree.right_child, ystree.TableNode):
joinAttr.dimTables.insert(0, tree.right_child)
get_tables(tree.left_child, joinAttr, aggNode, orderbyNode)
"""
Translate the type defined in the schema into the supported
type in the translated c program.
Currently only three types are supported:
INT, FLOAT and STRING.
"""
def to_ctype(colType):
if colType in ["INTEGER","DATE"]:
return "INT";
elif colType in ["TEXT"]:
return "STRING"
elif colType in ["DECIMAL"]:
return "FLOAT"
"""
Get the length of a given column.
"""
def type_length(tn, colIndex, colType):
if colType in ["INTEGER", "DATE"]:
return "sizeof(int)"
elif colType in ["TEXT"]:
colLen = schema[tn].column_list[colIndex].column_others
return str(colLen)
elif colType in ["DECIMAL"]:
return "sizeof(float)"
"""
Get the exp information from the where expresion.
"""
def get_where_attr(exp, whereList, relList, conList):
if isinstance(exp, ystree.YFuncExp):
if exp.func_name in ["AND", "OR"]:
for x in exp.parameter_list:
if isinstance(x, ystree.YFuncExp):
get_where_attr(x,whereList, relList, conList)
elif isinstance(x, ystree.YRawColExp):
whereList.append(x)
else:
relList.append(exp.func_name)
for x in exp.parameter_list:
if isinstance(x, ystree.YRawColExp):
whereList.append(x)
elif isinstance(x, ystree.YConsExp):
conList.append(x.cons_value)
elif isinstance(exp, ystree.YRawColExp):
whereList.append(exp)
"""
Generate a new where list where no duplate columns exist.
Return the number of columns in the new list.
"""
def count_whereList(wlist, tlist):
for col in wlist:
colExist = False
for x in tlist:
if x.compare(col) is True:
colExist = True
break
if colExist is False:
tlist.append(col)
return len(tlist)
"""
count the nested level of the where condition.
"""
def count_whereNested(exp):
count = 0
if isinstance(exp, ystree.YFuncExp):
if exp.func_name in ["AND", "OR"]:
for x in exp.parameter_list:
max = 0
if isinstance(x,ystree.YFuncExp) and x.func_name in ["AND","OR"]:
max +=1
max += count_whereNest(x)
if max > count:
count = max
return count
class mathExp:
opName = None
leftOp = None
rightOp = None
value = None
def __init__(self):
self.opName = None
self.leftOp = None
self.rightOp = None
self.value = None
def addOp(self, exp):
if isinstance(exp,ystree.YRawColExp):
self.opName = "COLUMN"
self.value = exp.column_name
elif isinstance(exp,ystree.YConsExp):
self.opName = "CONS"
self.value = exp.cons_value
elif isinstance(exp,ystree.YFuncExp):
self.opName = exp.func_name
leftExp = exp.parameter_list[0]
rightExp = exp.parameter_list[1]
self.leftOp = mathExp()
self.rightOp = mathExp()
self.leftOp.addOp(leftExp)
self.rightOp.addOp(rightExp)
### print the mathExp in c
def printMathFunc(fo,prefix, mathFunc):
if mathFunc.opName == "COLUMN":
print >>fo, prefix + ".op = NOOP;"
print >>fo, prefix + ".opNum = 1;"
print >>fo, prefix + ".exp = 0;"
print >>fo, prefix + ".opType = COLUMN;"
print >>fo, prefix + ".opValue = " + str(mathFunc.value) + ";"
elif mathFunc.opName == "CONS":
print >>fo, prefix + ".op = NOOP;"
print >>fo, prefix + ".opNum = 1;"
print >>fo, prefix + ".exp = 0;"
print >>fo, prefix + ".opType = CONS;"
print >>fo, prefix + ".opValue = " + str(mathFunc.value) + ";"
else:
print >>fo, prefix + ".op = " + mathFunc.opName + ";"
print >>fo, prefix + ".opNum = 2;"
print >>fo, prefix + ".exp = (long) malloc(sizeof(struct mathExp) * 2);"
prefix1 = "((struct mathExp *)" + prefix + ".exp)[0]"
prefix2 = "((struct mathExp *)"+ prefix + ".exp)[1]"
printMathFunc(fo,prefix1,mathFunc.leftOp)
printMathFunc(fo,prefix2,mathFunc.rightOp)
"""
generate_col_list gets all the columns that will be scannned for a given table node.
@indexList stores the index of each column.
@colList stores the columnExp for each column.
"""
def generate_col_list(tn,indexList, colList):
for col in tn.select_list.tmp_exp_list:
if col.column_name not in indexList:
indexList.append(col.column_name)
colList.append(col)
if tn.where_condition is not None:
whereList = []
relList = []
conList = []
get_where_attr(tn.where_condition.where_condition_exp,whereList,relList,conList)
for col in whereList:
if col.column_name not in indexList:
indexList.append(col.column_name)
colList.append(col)
"""
generate_code generates CUDA/OpenCL codes from the query plan tree.
Currently we only generate CUDA/OpenCL codes for star schema queries.
Several configurable variables (in config.py):
@CODETYPE determines whether CUDA or OpenCL codes should be generated.
0 represents CUDA and 1 represents OpenCL.
@joinType determines whether we should generate invisible joins for star
schema queries. 0 represents normal join and 1 represents invisible join.
@POS describes where the data are stored in the host memory and how the
codes should be generated. 0 means data are stored in pageable host
memory and data are explicitly transferred. 1 means data are stored in
pinned host memory and data are explicitly transferred. 2 means data are
stored in pinned host memory and the kernel will directly access the data
without explicit data transferring. 3 means data are stored in disk and only
mapped to host memory.
"""
def generate_code(tree):
"""
First check whether the value of each configurable variable is valid.
All should be integers.
"""
if CODETYPE not in [0,1]:
print "Error! The value of CODETYPE can only be 0 or 1."
exit(-1)
if POS not in [0,1,2,3,4]:
print "Error! The value of POS can only be 0,1,2,3,4."
exit(-1)
if joinType not in [0,1]:
print "Error! The value of JOINTYPE can only be 0 or 1."
exit(-1)
DTYPE_STR = ""
if CODETYPE == 1:
if PID not in [0,1,2,3]:
print "Error for PID!"
exit(-1)
if DTYPE not in [0,1,2]:
print "Error! The value of DTYPE can only be 0,1,2."
exit(-1)
if DTYPE == 0:
DTYPE_STR = "CL_DEVICE_TYPE_GPU"
elif DTYPE == 1:
DTYPE_STR = "CL_DEVICE_TYPE_CPU"
elif DTYPE == 2:
DTYPE_STR = "CL_DEVICE_TYPE_ACCELERATOR"
if CODETYPE==0:
fo = open("driver.cu","w")
else:
fo = open("driver.cpp","w")
print >>fo, "/* This file is generated by code_gen.py */"
print >>fo, "#include <stdio.h>"
print >>fo, "#include <stdlib.h>"
print >>fo, "#include <sys/types.h>"
print >>fo, "#include <sys/stat.h>"
print >>fo, "#include <fcntl.h>"
print >>fo, "#include <sys/mman.h>"
print >>fo, "#include <string.h>"
print >>fo, "#include <unistd.h>"
print >>fo, "#include <malloc.h>"
print >>fo, "#include <time.h>"
print >>fo, "#include <getopt.h>"
print >>fo, "#include <linux/limits.h>"
print >>fo, "#include \"../include/common.h\""
if joinType == 0:
print >>fo, "#include \"../include/hashJoin.h\""
else:
print >>fo, "#include \"../include/inviJoin.h\""
print >>fo, "#include \"../include/schema.h\""
if CODETYPE == 0:
print >>fo, "#include \"../include/cpuCudaLib.h\""
print >>fo, "#include \"../include/gpuCudaLib.h\""
print >>fo, "extern struct tableNode* tableScan(struct scanNode *,struct statistic *);"
if joinType == 0:
print >>fo, "extern struct tableNode* hashJoin(struct joinNode *, struct statistic *);"
else:
print >>fo, "extern struct tableNode* inviJoin(struct joinNode *, struct statistic *);"
print >>fo, "extern struct tableNode* groupBy(struct groupByNode *,struct statistic *);"
print >>fo, "extern struct tableNode* orderBy(struct orderByNode *, struct statistic *);"
print >>fo, "extern char* materializeCol(struct materializeNode * mn, struct statistic *);"
else:
print >>fo, "#include <CL/cl.h>"
print >>fo, "#include <string>"
print >>fo, "#include \"../include/gpuOpenclLib.h\"\n"
print >>fo, "#include\"../include/cpuOpenclLib.h\""
print >>fo, "using namespace std;"
print >>fo, "extern const char * createProgram(string, int *);"
print >>fo, "extern struct tableNode* tableScan(struct scanNode *,struct clContext *, struct statistic *);"
if joinType == 0:
print >>fo, "extern struct tableNode* hashJoin(struct joinNode *, struct clContext *, struct statistic *);"
else:
print >>fo, "extern struct tableNode* inviJoin(struct joinNode *, struct clContext *, struct statistic *);"
print >>fo, "extern struct tableNode* groupBy(struct groupByNode *, struct clContext *, struct statistic *);"
print >>fo, "extern struct tableNode* orderBy(struct orderByNode *, struct clContext *, struct statistic *);"
print >>fo, "extern char * materializeCol(struct materializeNode * mn, struct clContext *, struct statistic *);"
print >>fo, "#define CHECK_POINTER(p) do {\\"
print >>fo, "\tif(p == NULL){ \\"
print >>fo, "\t\tperror(\"Failed to allocate host memory\"); \\"
print >>fo, "\t\texit(-1); \\"
print >>fo, "\t}} while(0)"
print >>fo, "int main(int argc, char ** argv){\n"
if CODETYPE == 1:
print >>fo, "\tint psc = 0;"
print >>fo, "\tvoid * clTmp;"
print >>fo, "\tconst char * ps = createProgram(\"kernel.cl\",&psc);"
print >>fo, "\tstruct clContext context;"
print >>fo, "\tcl_uint numP;"
print >>fo, "\tcl_int error = 0;"
print >>fo, "\tcl_device_id device;"
print >>fo, "\tclGetPlatformIDs(0,NULL,&numP);"
print >>fo, "\tcl_platform_id * pid = new cl_platform_id[numP];"
print >>fo, "\tclGetPlatformIDs(numP, pid, NULL);"
print >>fo, "\tclGetDeviceIDs(pid[" + str(PID) + "]," + DTYPE_STR +",1,&device,NULL);"
print >>fo, "\tcontext.context = clCreateContext(0,1,&device,NULL,NULL,&error);"
print >>fo, "\tcl_command_queue_properties prop = 0;"
print >>fo, "\tprop |= CL_QUEUE_PROFILING_ENABLE;"
print >>fo, "\tcontext.queue = clCreateCommandQueue(context.context, device, prop, &error);"
print >>fo, "\tcontext.program = clCreateProgramWithSource(context.context, psc, (const char **)&ps, 0, &error);"
print >>fo, "\terror = clBuildProgram(context.program, 0, 0 , \"-I .\" , 0, 0);\n"
else:
print >>fo, "/* For initializing CUDA device */"
print >>fo, "\tint * cudaTmp;"
print >>fo, "\tcudaMalloc((void**)&cudaTmp,sizeof(int));"
print >>fo, "\tcudaFree(cudaTmp);\n"
print >>fo, "\tint table;"
print >>fo, "\tint long_index;"
print >>fo, "\tchar path[PATH_MAX];"
print >>fo, "\tint setPath = 0;"
print >>fo, "\tstruct option long_options[] = {"
print >>fo, "\t\t{\"datadir\",required_argument,0,'0'}"
print >>fo, "\t};\n"
print >>fo, "\twhile((table=getopt_long(argc,argv,\"\",long_options,&long_index))!=-1){"
print >>fo, "\t\tswitch(table){"
print >>fo, "\t\t\tcase '0':"
print >>fo, "\t\t\t\tsetPath = 1;"
print >>fo, "\t\t\t\tstrcpy(path,optarg);"
print >>fo, "\t\t\t\tbreak;"
print >>fo, "\t\t}"
print >>fo, "\t}\n"
print >>fo, "\tif(setPath == 1)"
print >>fo, "\t\tchdir(path);\n"
print >>fo, "\tstruct timespec start,end;"
print >>fo, "\tstruct timespec diskStart, diskEnd;"
print >>fo, "\tdouble diskTotal = 0;"
print >>fo, "\tclock_gettime(CLOCK_REALTIME,&start);"
print >>fo, "\tstruct statistic pp;"
print >>fo, "\tpp.total = pp.kernel = pp.pcie = 0;"
resultNode = "result"
joinAttr = JoinTranslation()
aggNode = []
orderbyNode = []
get_tables(tree, joinAttr,aggNode, orderbyNode)
print >>fo, "\tstruct tableNode *" + resultNode + " = (struct tableNode*) malloc(sizeof(struct tableNode));"
print >>fo, "\tCHECK_POINTER("+resultNode+");"
print >>fo, "\tinitTable("+resultNode +");"
"""
Scan all the dimension tables first.
"""
for tn in joinAttr.dimTables:
print >>fo, "\tstruct tableNode *" + tn.table_name.lower() +"Table;"
print >>fo, "\tint outFd;"
print >>fo, "\tlong outSize;"
print >>fo, "\tchar *outTable;"
print >>fo, "\tlong offset, tupleOffset;"
print >>fo, "\tint blockTotal;"
print >>fo, "\tstruct columnHeader header;\n"
for tn in joinAttr.dimTables:
resName = tn.table_name.lower() + "Res"
tnName = tn.table_name.lower() + "Table"
indexList = []
colList = []
generate_col_list(tn,indexList,colList)
totalAttr = len(indexList)
setTupleNum = 0
tupleSize = "0"
selectList = tn.select_list.tmp_exp_list
for i in range(0,totalAttr):
col = colList[i]
ctype = to_ctype(col.column_type)
colIndex = int(col.column_name)
colLen = type_length(tn.table_name, colIndex, col.column_type)
tupleSize += " + " + colLen
print >>fo, "\toutFd = open(\""+tn.table_name+str(colIndex)+"\",O_RDONLY);"
print >>fo, "\tread(outFd,&header, sizeof(struct columnHeader));"
if setTupleNum == 0:
setTupleNum = 1
print >>fo, "\tblockTotal = header.blockTotal;"
print >>fo, "\tclose(outFd);"
break
print >>fo, "\toffset=0;"
print >>fo, "\ttupleOffset=0;"
print >>fo, "\tstruct tableNode *" + resName + " = (struct tableNode *)malloc(sizeof(struct tableNode));"
print >>fo, "\tCHECK_POINTER("+ resName + ");"
print >>fo, "\tinitTable("+resName +");"
print >>fo, "\tfor(int i=0;i<blockTotal;i++){"
print >>fo, "\t\t" + tnName+" = (struct tableNode *) malloc(sizeof(struct tableNode));"
print >>fo, "\t\tCHECK_POINTER(" + tnName + ");"
print >>fo, "\t\t" + tnName+"->totalAttr = " + str(totalAttr) + ";"
print >>fo, "\t\t" + tnName+"->attrType = (int *) malloc(sizeof(int)*"+str(totalAttr)+");"
print >>fo, "\t\tCHECK_POINTER(" + tnName + "->attrType);"
print >>fo, "\t\t" + tnName+"->attrSize = (int *) malloc(sizeof(int)*"+str(totalAttr)+");"
print >>fo, "\t\tCHECK_POINTER(" + tnName + "->attrSize);"
print >>fo, "\t\t" + tnName+"->attrIndex = (int *) malloc(sizeof(int)*"+str(totalAttr)+");"
print >>fo, "\t\tCHECK_POINTER(" + tnName + "->attrIndex);"
print >>fo, "\t\t" + tnName+"->attrTotalSize = (int *) malloc(sizeof(int)*"+str(totalAttr)+");"
print >>fo, "\t\tCHECK_POINTER(" + tnName + "->attrTotalSize);"
print >>fo, "\t\t" + tnName+"->dataPos = (int *) malloc(sizeof(int)*"+str(totalAttr)+");"
print >>fo, "\t\tCHECK_POINTER(" + tnName + "->dataPos);"
print >>fo, "\t\t" + tnName+"->dataFormat = (int *) malloc(sizeof(int)*"+str(totalAttr)+");"
print >>fo, "\t\tCHECK_POINTER(" + tnName + "->dataFormat);"
print >>fo, "\t\t" + tnName+"->content = (char **) malloc(sizeof(char *)*"+str(totalAttr)+");"
print >>fo, "\t\tCHECK_POINTER(" + tnName + "->content);"
for i in range(0,totalAttr):
col = colList[i]
ctype = to_ctype(col.column_type)
colIndex = int(col.column_name)
colLen = type_length(tn.table_name, colIndex, col.column_type)
tupleSize += " + " + colLen
print >>fo, "\t\t" + tnName+"->attrSize["+str(i) + "] = "+ colLen + ";"
print >>fo, "\t\t" + tnName+"->attrIndex["+str(i) + "] = "+ str(colIndex) + ";"
print >>fo, "\t\t" + tnName+"->attrType[" + str(i) + "] = " + ctype + ";"
if POS == 0:
print >>fo, "\t\t" + tnName+"->dataPos[" + str(i) + "] = MEM;"
elif POS == 1:
print >>fo, "\t\t" + tnName+"->dataPos[" + str(i) + "] = PINNED;"
elif POS == 2:
print >>fo, "\t\t" + tnName+"->dataPos[" + str(i) + "] = UVA;"
elif POS == 3:
print >>fo, "\t\t" + tnName+"->dataPos[" + str(i) + "] = MMAP;"
elif POS == 4:
print >>fo, "\t\t" + tnName+"->dataPos[" + str(i) + "] = GPU;"
else:
print >>fo, "\t\t" + tnName+"->dataPos[" + str(i) + "] = MEM;"
print >>fo, "\t\toutFd = open(\""+tn.table_name+str(colIndex)+"\",O_RDONLY);"
print >>fo, "\t\toffset = i * sizeof(struct columnHeader) + tupleOffset *" + str(colLen) + ";"
print >>fo, "\t\tlseek(outFd,offset,SEEK_SET);"
print >>fo, "\t\tread(outFd,&header, sizeof(struct columnHeader));"
print >>fo, "\t\toffset += sizeof(struct columnHeader);"
print >>fo, "\t\t" + tnName + "->dataFormat[" + str(i) + "] = header.format;"
print >>fo, "\t\toutSize = header.tupleNum * " + colLen + ";"
print >>fo, "\t\t" + tnName + "->attrTotalSize[" + str(i) + "] = outSize;"
print >>fo, "\t\tclock_gettime(CLOCK_REALTIME,&diskStart);"
print >>fo, "\t\toutTable =(char *) mmap(0,outSize,PROT_READ,MAP_SHARED,outFd,offset);\n"
if CODETYPE == 0:
if POS == 1:
print >>fo, "\t\tCUDA_SAFE_CALL_NO_SYNC(cudaMallocHost((void **)&" + tnName+"->content["+str(i)+"],outSize));"
print >>fo, "\t\tmemcpy("+tnName+"->content["+str(i)+"],outTable,outSize);"
elif POS == 2:
print >>fo, "\t\tCUDA_SAFE_CALL_NO_SYNC(cudaMallocHost((void **)&" + tnName+"->content["+str(i)+"],outSize));"
print >>fo, "\t\tmemcpy("+tnName+"->content["+str(i)+"],outTable,outSize);"
elif POS == 3:
print >>fo, "\t\t"+tnName+"->content["+str(i)+"] = (char *)mmap(0,outSize,PROT_READ,MAP_SHARED,outFd,offset);"
elif POS == 4:
print >>fo, "\t\tCUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&" + tnName+"->content["+str(i)+"],outSize));"
print >>fo, "\t\tcudaMemcpy("+tnName+"->content["+str(i)+"],outTable,outSize, cudaMemcpyHostToDevice);"
else:
print >>fo, "\t\t"+tnName+"->content["+str(i)+"] = (char *)memalign(256,outSize);"
print >>fo, "\t\tmemcpy("+tnName+"->content["+str(i)+"],outTable,outSize);"
else:
if POS == 0:
print >>fo, "\t\t"+tnName+"->content["+str(i)+"] = (char *)memalign(256,outSize);"
print >>fo, "\t\tmemcpy("+tnName+"->content["+str(i)+"],outTable,outSize);"
elif POS == 3:
print >>fo, "\t\t"+tnName+"->content["+str(i)+"] = (char *)mmap(0,outSize,PROT_READ,MAP_SHARED,outFd,offset);"
else:
print >>fo, "\t\t"+tnName+"->content["+str(i)+"] = (char *)clCreateBuffer(context.context,CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR,outSize,NULL,0);"
print >>fo, "\t\tclTmp = clEnqueueMapBuffer(context.queue,(cl_mem)"+tnName+"->content["+str(i)+"],CL_TRUE,CL_MAP_WRITE,0,outSize,0,0,0,0);"
print >>fo, "\t\tmemcpy(clTmp,outTable,outSize);"
print >>fo, "\t\tclEnqueueUnmapMemObject(context.queue,(cl_mem)"+tnName+"->content["+str(i)+"],clTmp,0,0,0);"
print >>fo, "\t\tmunmap(outTable,outSize);"
print >>fo, "\t\tclock_gettime(CLOCK_REALTIME,&diskEnd);"
print >>fo, "\t\tdiskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec;"
print >>fo, "\t\tclose(outFd);"
tupleSize += ";\n"
print >>fo, "\t\t" + tnName + "->tupleSize = " + tupleSize
print >>fo, "\t\t"+tnName+"->tupleNum = header.tupleNum;"
if tn.where_condition is not None:
whereList = []
relList = []
conList = []
get_where_attr(tn.where_condition.where_condition_exp, whereList, relList, conList)
newWhereList = []
whereLen = count_whereList(whereList, newWhereList)
nested = count_whereNested(tn.where_condition.where_condition_exp)
if nested != 0:
print "Not supported yet: the where expression is too complicated"
print 1/0
relName = tn.table_name.lower() + "Rel"
print >>fo, "\t\tstruct scanNode " + relName + ";"
print >>fo, "\t\t" + relName + ".tn = " + tnName + ";"
print >>fo, "\t\t" + relName + ".hasWhere = 1;"
print >>fo, "\t\t" + relName + ".whereAttrNum = " + str(whereLen) + ";"
print >>fo, "\t\t" + relName + ".whereIndex = (int *)malloc(sizeof(int)*" + str(len(whereList)) + ");"
print >>fo, "\t\tCHECK_POINTER(" + relName + ".whereIndex);"
print >>fo, "\t\t" + relName + ".outputNum = " + str(len(selectList)) + ";"
print >>fo, "\t\t" + relName + ".outputIndex = (int *)malloc(sizeof(int) * " + str(len(selectList)) + ");"
print >>fo, "\t\tCHECK_POINTER(" + relName + ".outputIndex);"
for i in range(0,len(selectList)):
colIndex = selectList[i].column_name
outputIndex = indexList.index(colIndex)
print >>fo, "\t\t" + relName + ".outputIndex[" + str(i) + "] = " + str(outputIndex) + ";"
for i in range(0,len(newWhereList)):
colIndex = indexList.index(newWhereList[i].column_name)
print >>fo, "\t\t" + relName + ".whereIndex["+str(i) + "] = " + str(colIndex) + ";"
if keepInGpu ==0:
print >>fo, "\t\t" + relName + ".KeepInGpu = 0;"
else:
print >>fo, "\t\t" + relName + ".keepInGpu = 1;"
print >>fo, "\t\t" + relName + ".filter = (struct whereCondition *)malloc(sizeof(struct whereCondition));"
print >>fo, "\t\tCHECK_POINTER(" + relName + ".filter);"
print >>fo, "\t\t(" + relName + ".filter)->nested = 0;"
print >>fo, "\t\t(" + relName + ".filter)->expNum = " + str(len(whereList)) + ";"
print >>fo, "\t\t(" + relName + ".filter)->exp = (struct whereExp*) malloc(sizeof(struct whereExp) *" + str(len(whereList)) + ");"
print >>fo, "\t\tCHECK_POINTER((" + relName + ".filter)->exp);"
if tn.where_condition.where_condition_exp.func_name in ["AND","OR"]:
print >>fo, "\t\t(" + relName + ".filter)->andOr = " + tn.where_condition.where_condition_exp.func_name + ";"
else:
print >>fo, "\t\t(" + relName + ".filter)->andOr = EXP;"
for i in range(0,len(whereList)):
colIndex = -1
for j in range(0,len(newWhereList)):
if newWhereList[j].compare(whereList[i]) is True:
colIndex = j
break
if colIndex <0:
print 1/0
print >>fo, "\t\t(" + relName + ".filter)->exp[" + str(i) + "].index = " + str(colIndex) + ";"
print >>fo, "\t\t(" + relName + ".filter)->exp[" + str(i) + "].relation = " + relList[i] + ";"
colType = whereList[i].column_type
ctype = to_ctype(colType)
if ctype == "INT":
print >>fo, "\t\t{"
print >>fo, "\t\t\tint tmp = " + conList[i] + ";"
print >>fo, "\t\t\tmemcpy((" + relName + ".filter)->exp[" + str(i) + "].content, &tmp,sizeof(int));"
print >>fo, "\t\t}"
elif ctype == "FLOAT":
print >>fo, "\t\t{"
print >>fo, "\t\t\tfloat tmp = " + conList[i] + ";"
print >>fo, "\t\t\tmemcpy((" + relName + ".filter)->exp[" + str(i) + "].content, &tmp,sizeof(float));"
print >>fo, "\t\t}"
else:
print >>fo, "\t\tstrcpy((" + relName + ".filter)->exp[" + str(i) + "].content," + conList[i] + ");\n"
if CODETYPE == 0:
print >>fo, "\t\tstruct tableNode *tmp = tableScan(&" + relName + ", &pp);"
else:
print >>fo, "\t\tstruct tableNode *tmp = tableScan(&" + relName + ", &context,&pp);"
print >>fo, "\t\tif(blockTotal !=1){"
if CODETYPE == 0:
print >>fo, "\t\t\tmergeIntoTable(" + resName + ",tmp,&pp);"
else:
print >>fo, "\t\t\tmergeIntoTable(" + resName + ",tmp, &context,&pp);"
print >>fo, "\t\t}else{"
print >>fo, "\t\t\tfree(" + resName + ");"
print >>fo, "\t\t\t" + resName + " = tmp;"
print >>fo, "\t\t}"
print >>fo, "\t\tclock_gettime(CLOCK_REALTIME,&diskStart);"
print >>fo, "\t\tfreeScan(&" + relName + ");\n"
if CODETYPE == 1:
print >>fo, "\t\tclFinish(context.queue);"
print >>fo, "\t\tclock_gettime(CLOCK_REALTIME,&diskEnd);"
print >>fo, "\t\tdiskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec;"
############## end of wherecondition not none
else:
print >>fo, "\t\tif(blockTotal != 1){"
if CODETYPE == 0:
print >>fo, "\t\t\tmergeIntoTable(" + resName + "," + tnName +",&pp);"
else:
print >>fo, "\t\t\tmergeIntoTable(" + resName + "," + tnName +",&context,&pp);"
print >>fo, "\t\t\tclock_gettime(CLOCK_REALTIME,&diskStart);"
print >>fo, "\t\t\tfreeTable(" + tnName + ");"
if CODETYPE == 1:
print >>fo, "\t\t\tclFinish(context.queue);"
print >>fo, "\t\t\tclock_gettime(CLOCK_REALTIME,&diskEnd);"
print >>fo, "\t\t\tdiskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec;"
print >>fo, "\t\t}else{"
print >>fo, "\t\t\tfree(" + resName + ");"
print >>fo, "\t\t\t" + resName + " = " + tnName + ";"
print >>fo, "\t\t}"
print >>fo, "\t\ttupleOffset += header.tupleNum;\n"
print >>fo, "\t}\n"
if joinType == 0:
"""
Generate the codes for star schema joins.
0 represents normal hash join and 1 represents invisible join.
"""
selectOnly = len(joinAttr.dimTables) == 0
hasWhere = 0
factName = joinAttr.factTables[0].table_name.lower() + "Table"
resName = joinAttr.factTables[0].table_name.lower() + "Res"
setTupleNum = 0
selectList = joinAttr.factTables[0].select_list.tmp_exp_list
indexList = []
colList = []
generate_col_list(joinAttr.factTables[0],indexList,colList)
totalAttr = len(indexList)
for i in range(0,totalAttr):
col = colList[i]
if isinstance(col, ystree.YRawColExp):
colType = col.column_type
colIndex = col.column_name
ctype = to_ctype(colType)
colLen = type_length(joinAttr.factTables[0].table_name, colIndex, colType)
elif isinstance(col, ystree.YConsExp):
colType = col.cons_type
ctype = to_ctype(colType)
if cons_type == "INTEGER":
colLen = "sizeof(int)"
elif cons_type == "FLOAT":
colLen = "sizeof(float)"
else:
colLen = str(len(col.cons_value))
elif isinstance(col, ystree.YFuncExp):
print 1/0
if setTupleNum == 0:
setTupleNum = 1
print >>fo, "\toutFd = open(\"" + joinAttr.factTables[0].table_name + str(colIndex) + "\",O_RDONLY);"
print >>fo, "\tread(outFd, &header, sizeof(struct columnHeader));"
print >>fo, "\tblockTotal = header.blockTotal;"
print >>fo, "\tclose(outFd);"
break
print >>fo, "\toffset = 0;"
print >>fo, "\tlong blockSize["+str(totalAttr) + "];"
print >>fo, "\tfor(int i=0;i<" + str(totalAttr) + ";i++)"
print >>fo, "\t\tblockSize[i] = 0;"
print >>fo, "\tfor(int i=0;i<blockTotal;i++){\n"
print >>fo, "\t\tstruct tableNode *" + factName + " = (struct tableNode*)malloc(sizeof(struct tableNode));"
print >>fo, "\t\tCHECK_POINTER(" + factName + ");"
print >>fo, "\t\t" + factName + "->totalAttr = " + str(totalAttr) + ";"
print >>fo, "\t\t" + factName + "->attrType = (int *) malloc(sizeof(int)*" + str(totalAttr) + ");"
print >>fo, "\t\tCHECK_POINTER(" + factName + "->attrType);"
print >>fo, "\t\t" + factName + "->attrSize = (int *) malloc(sizeof(int)*" + str(totalAttr) + ");"
print >>fo, "\t\tCHECK_POINTER(" + factName + "->attrSize);"
print >>fo, "\t\t" + factName + "->attrIndex = (int *) malloc(sizeof(int)*" + str(totalAttr) + ");"
print >>fo, "\t\tCHECK_POINTER(" + factName + "->attrIndex);"
print >>fo, "\t\t" + factName + "->attrTotalSize = (int *) malloc(sizeof(int)*" + str(totalAttr) + ");"
print >>fo, "\t\tCHECK_POINTER(" + factName + "->attrTotalSize);"
print >>fo, "\t\t" + factName + "->dataPos = (int *) malloc(sizeof(int)*" + str(totalAttr) + ");"
print >>fo, "\t\tCHECK_POINTER(" + factName + "->dataPos);"
print >>fo, "\t\t" + factName + "->dataFormat = (int *) malloc(sizeof(int)*" + str(totalAttr) + ");"
print >>fo, "\t\tCHECK_POINTER(" + factName + "->dataFormat);"
print >>fo, "\t\t" + factName + "->content = (char **) malloc(sizeof(char *)*" + str(totalAttr) + ");"
print >>fo, "\t\tCHECK_POINTER(" + factName + "->content);"
tupleSize = "0"
for i in range(0,totalAttr):
col = colList[i]
if isinstance(col, ystree.YRawColExp):
colType = col.column_type
colIndex = col.column_name
ctype = to_ctype(colType)
colLen = type_length(joinAttr.factTables[0].table_name, colIndex, colType)
elif isinstance(col, ystree.YConsExp):
colType = col.cons_type
ctype = to_ctype(colType)
if cons_type == "INTEGER":
colLen = "sizeof(int)"
elif cons_type == "FLOAT":
colLen = "sizeof(float)"
else:
colLen = str(len(col.cons_value))
elif isinstance(col, ystree.YFuncExp):
print 1/0
tupleSize += " + " + colLen
print >>fo, "\t\t" + factName + "->attrType[" + str(i) + "] = " + ctype + ";"
print >>fo, "\t\t" + factName + "->attrSize[" + str(i) + "] = " + colLen + ";"
print >>fo, "\t\t" + factName + "->attrIndex[" + str(i) + "] = " + str(colIndex) + ";"
if POS == 0:
print >>fo, "\t\t" + factName + "->dataPos[" + str(i) + "] = MEM;"
elif POS == 1:
print >>fo, "\t\t" + factName + "->dataPos[" + str(i) + "] = PINNED;"
elif POS == 2:
print >>fo, "\t\t" + factName + "->dataPos[" + str(i) + "] = UVA;"
elif POS == 3:
print >>fo, "\t\t" + factName + "->dataPos[" + str(i) + "] = MMAP;"
elif POS == 4:
print >>fo, "\t\t" + factName + "->dataPos[" + str(i) + "] = GPU;"
else:
print >>fo, "\t\t" + factName + "->dataPos[" + str(i) + "] = MEM;"
print >>fo, "\t\toutFd = open(\"" + joinAttr.factTables[0].table_name + str(colIndex) + "\", O_RDONLY);"
print >>fo, "\t\toffset = i*sizeof(struct columnHeader) + blockSize[" + str(i) + "];"
print >>fo, "\t\tlseek(outFd,offset,SEEK_SET);"
print >>fo, "\t\tread(outFd, &header, sizeof(struct columnHeader));"
print >>fo, "\t\tblockSize[" + str(i) + "] += header.blockSize;"
print >>fo, "\t\toffset += sizeof(struct columnHeader);"
print >>fo, "\t\t" + factName + "->dataFormat[" + str(i) + "] = header.format;"
print >>fo, "\t\toutSize = header.blockSize;"
print >>fo, "\t\tclock_gettime(CLOCK_REALTIME,&diskStart);"
print >>fo, "\t\toutTable = (char *)mmap(0,outSize,PROT_READ,MAP_SHARED,outFd,offset);"
if CODETYPE == 0:
if POS == 0:
print >>fo, "\t\t" + factName + "->content[" + str(i) + "] = (char *)malloc(outSize);\n"
print >>fo, "\t\tCHECK_POINTER(" + factName + "->content[" + str(i) + "]);"
print >>fo, "\t\tmemcpy("+factName+"->content["+str(i)+"],outTable,outSize);"
elif POS == 1:
print >>fo, "\t\tCUDA_SAFE_CALL_NO_SYNC(cudaMallocHost((void**)&"+factName+"->content["+str(i)+"],outSize));"
print >>fo, "\t\tmemcpy("+factName+"->content["+str(i)+"],outTable,outSize);"
elif POS == 2:
print >>fo, "\t\tCUDA_SAFE_CALL_NO_SYNC(cudaMallocHost((void**)&"+factName+"->content["+str(i)+"],outSize));"
print >>fo, "\t\tmemcpy("+factName+"->content["+str(i)+"],outTable,outSize);"
elif POS == 3:
print >>fo, "\t\t"+factName+"->content["+str(i)+"] = (char *)mmap(0,outSize,PROT_READ,MAP_SHARED,outFd,offset);"
elif POS == 4:
print >>fo, "\t\tCUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&"+factName+"->content["+str(i)+"],outSize));"
print >>fo, "\t\tcudaMemcpy("+factName+"->content["+str(i)+"],outTable,outSize, cudaMemcpyHostToDevice);"
else:
print >>fo, "\t\t" + factName + "->content[" + str(i) + "] = (char*)memalign(256,outSize);\n"
print >>fo, "\t\tmemcpy("+factName+"->content["+str(i)+"],outTable,outSize);"
else:
if POS == 0:
print >>fo, "\t\t"+factName+"->content["+str(i)+"] = (char *)memalign(256,outSize);"
print >>fo, "\t\tmemcpy("+factName+"->content["+str(i)+"],outTable,outSize);"
elif POS == 3:
print >>fo, "\t\t"+factName+"->content["+str(i)+"] = (char *)mmap(0,outSize,PROT_READ,MAP_SHARED,outFd,offset);"
else:
print >>fo, "\t\t"+factName+"->content["+str(i)+"] = (char *)clCreateBuffer(context.context,CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR,outSize,NULL,0);"
print >>fo, "\t\tclTmp = clEnqueueMapBuffer(context.queue,(cl_mem)"+factName+"->content["+str(i)+"],CL_TRUE,CL_MAP_WRITE,0,outSize,0,0,0,0);"
print >>fo, "\t\tmemcpy(clTmp,outTable,outSize);"
print >>fo, "\t\tclEnqueueUnmapMemObject(context.queue,(cl_mem)"+factName+"->content["+str(i)+"],clTmp,0,0,0);"
print >>fo, "\t\tmunmap(outTable,outSize);"
print >>fo, "\t\tclock_gettime(CLOCK_REALTIME,&diskEnd);"
print >>fo, "\t\tdiskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec;"
print >>fo, "\t\tclose(outFd);"
print >>fo, "\t\t" + factName + "->attrTotalSize[" + str(i) + "] = outSize;"
tupleSize += ";\n"
print >>fo, "\t\t" + factName + "->tupleSize = " + tupleSize
print >>fo, "\t\t" + factName + "->tupleNum = header.tupleNum;"
################# end of reading the needed attributes of fact table from disk ###############
if joinAttr.factTables[0].where_condition is not None:
hasWhere = 1
whereExp = joinAttr.factTables[0].where_condition.where_condition_exp
whereList = []
relList = []
conList = []
get_where_attr(whereExp,whereList,relList,conList)
newWhereList = []
whereLen = count_whereList(whereList, newWhereList)
nested = count_whereNested(whereExp)
if nested !=0:
print "Not supported yet: the where expression is too complicated"
print 1/0
relName = joinAttr.factTables[0].table_name.lower() + "Rel"
print >>fo, "\t\tstruct scanNode " + relName + ";"
print >>fo, "\t\t" + relName + ".tn = " + factName + ";"
print >>fo, "\t\t" + relName + ".hasWhere = 1;"
print >>fo, "\t\t" + relName + ".whereAttrNum = " + str(whereLen) + ";"
print >>fo, "\t\t" + relName + ".outputNum = " + str(len(selectList)) + ";"
print >>fo, "\t\t" + relName + ".whereIndex = (int *)malloc(sizeof(int)*" + str(whereLen) + ");"
print >>fo, "\t\tCHECK_POINTER(" + relName + ".whereIndex);"
print >>fo, "\t\t" + relName + ".outputIndex = (int *)malloc(sizeof(int)*" + str(len(selectList)) + ");"
print >>fo, "\t\tCHECK_POINTER(" + relName + ".outputIndex);"
for i in range(0,len(newWhereList)):
colIndex = indexList.index(newWhereList[i].column_name)
print >>fo, "\t\t" + relName + ".whereIndex["+str(i) + "] = " + str(colIndex) + ";"
for i in range(0,len(selectList)):
colIndex = selectList[i].column_name
outputIndex = indexList.index(colIndex)
print >>fo, "\t\t" + relName + ".outputIndex[" + str(i) + " ] = " + str(outputIndex) + ";"
if keepInGpu == 0:
print >>fo, "\t\t" + relName + ".keepInGpu = 0;"
else:
print >>fo, "\t\t" + relName + ".keepInGpu = 1;"
print >>fo, "\t\t" + relName + ".filter = (struct whereCondition *)malloc(sizeof(struct whereCondition));"
print >>fo, "\t\tCHECK_POINTER(" + relName + ".filter);"
print >>fo, "\t\t(" + relName + ".filter)->nested = 0;"
print >>fo, "\t\t(" + relName + ".filter)->expNum = " + str(len(whereList)) + ";"
print >>fo, "\t\t(" + relName + ".filter)->exp = (struct whereExp*) malloc(sizeof(struct whereExp) *" + str(len(whereList)) + ");"
print >>fo, "\t\tCHECK_POINTER((" + relName + ".filter)->exp);"
if joinAttr.factTables[0].where_condition.where_condition_exp.func_name in ["AND","OR"]:
print >>fo, "\t\t(" + relName + ".filter)->andOr = " + joinAttr.factTables[0].where_condition.where_condition_exp.func_name + ";"
else:
print >>fo, "\t\t(" + relName + ".filter)->andOr = EXP;"
for i in range(0,len(whereList)):
colIndex = -1
for j in range(0,len(newWhereList)):
if newWhereList[j].compare(whereList[i]) is True:
colIndex = j
break
if colIndex <0:
print 1/0
print >>fo, "\t\t(" + relName + ".filter)->exp[" + str(i) + "].index = " + str(colIndex) + ";"
print >>fo, "\t\t(" + relName + ".filter)->exp[" + str(i) + "].relation = " + relList[i] + ";"
colType = whereList[i].column_type
ctype = to_ctype(colType)
if ctype == "INT":
print >>fo, "\t\t{"
print >>fo, "\t\t\tint tmp = " + conList[i] + ";"
print >>fo, "\t\t\tmemcpy((" + relName + ".filter)->exp[" + str(i) + "].content, &tmp,sizeof(int));"
print >>fo, "\t\t}"
elif ctype == "FLOAT":
print >>fo, "\t\t{"
print >>fo, "\t\t\tfloat tmp = " + conList[i] + ";"
print >>fo, "\t\t\tmemcpy((" + relName + ".filter)->exp[" + str(i) + "].content, &tmp,sizeof(float));"
print >>fo, "\t\t}"
print 1/0
else:
print >>fo, "\t\tstrcpy((" + relName + ".filter)->exp[" + str(i) + "].content," + conList[i] + ");\n"
if CODETYPE == 0:
print >>fo, "\t\tstruct tableNode * " + resName + " = tableScan(&" + relName + ", &pp);"
else:
print >>fo, "\t\tstruct tableNode * " + resName + " = tableScan(&" + relName + ", &context,&pp);"
if selectOnly == 0:
print >>fo, "\t\tclock_gettime(CLOCK_REALTIME,&diskStart);"
print >>fo, "\t\tfreeScan(&" + relName + ");\n"
if CODETYPE == 1:
print >>fo, "\t\tclFinish(context.queue);"
print >>fo, "\t\tclock_gettime(CLOCK_REALTIME,&diskEnd);"
print >>fo, "\t\tdiskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec;"
else:
hasWhere = 0
print >>fo, "\t\tstruct tableNode * " + resName + " = " + factName + ";"
factName = resName
for i in range(0,len(joinAttr.dimTables)):
jName = "jNode" + str(i)
dimName = joinAttr.dimTables[i].table_name.lower() + "Res"
print >>fo, "\t\tstruct joinNode " + jName + ";"
print >>fo, "\t\t" + jName + ".leftTable = " + factName + ";"
print >>fo, "\t\t" + jName + ".rightTable = " + dimName + ";"
lOutList = joinAttr.outIndex[i][0]
rOutList = joinAttr.outIndex[i][1]
lPosList = joinAttr.outPos[i][0]
rPosList = joinAttr.outPos[i][1]
lAttrList = joinAttr.outAttr[i][0]
rAttrList = joinAttr.outAttr[i][1]
print >>fo, "\t\t" + jName + ".totalAttr = " + str(len(rOutList) + len(lOutList)) + ";"
print >>fo, "\t\t" + jName + ".keepInGpu = (int *) malloc(sizeof(int) * " + str(len(rOutList) + len(lOutList)) + ");"
print >>fo, "\t\tCHECK_POINTER(" + jName + ".keepInGpu);"
if keepInGpu == 0:
print >>fo, "\t\tfor(int k=0;k<" + str(len(rOutList) + len(lOutList)) + ";k++)"
print >>fo, "\t\t\t" + jName + ".keepInGpu[k] = 0;"
else:
print >>fo, "\t\tfor(int k=0;k<" + str(len(rOutList) + len(lOutList)) + ";k++)"
print >>fo, "\t\t\t" + jName + ".keepInGpu[k] = 1;"
print >>fo, "\t\t" + jName + ".rightOutputAttrNum = " + str(len(rOutList)) + ";"
print >>fo, "\t\t" + jName + ".leftOutputAttrNum = " + str(len(lOutList)) + ";"
print >>fo, "\t\t" + jName + ".leftOutputAttrType = (int *)malloc(sizeof(int)*" + str(len(lOutList)) + ");"
print >>fo, "\t\tCHECK_POINTER(" + jName + ".leftOutputAttrType);"
print >>fo, "\t\t" + jName + ".leftOutputIndex = (int *)malloc(sizeof(int)*" + str(len(lOutList)) + ");"
print >>fo, "\t\tCHECK_POINTER(" + jName + ".leftOutputIndex);"
print >>fo, "\t\t" + jName + ".leftPos = (int *)malloc(sizeof(int)*" + str(len(lOutList)) + ");"
print >>fo, "\t\tCHECK_POINTER(" + jName + ".leftPos);"
print >>fo, "\t\t" + jName + ".tupleSize = 0;"
for j in range(0,len(lOutList)):
ctype = to_ctype(lAttrList[j].type)
print >>fo, "\t\t" + jName + ".leftOutputIndex[" + str(j) + "] = " + str(lOutList[j]) + ";"
print >>fo, "\t\t" + jName + ".leftOutputAttrType[" + str(j) + "] = " + ctype + ";"
print >>fo, "\t\t" + jName + ".leftPos[" + str(j) + "] = " + str(lPosList[j]) + ";"
print >>fo, "\t\t" + jName + ".tupleSize += " + factName + "->attrSize[" + str(lOutList[j]) + "];"
print >>fo, "\t\t" + jName + ".rightOutputAttrType = (int *)malloc(sizeof(int)*" + str(len(rOutList)) + ");"
print >>fo, "\t\tCHECK_POINTER(" + jName + ".rightOutputAttrType);"
print >>fo, "\t\t" + jName + ".rightOutputIndex = (int *)malloc(sizeof(int)*" + str(len(rOutList)) + ");"
print >>fo, "\t\tCHECK_POINTER(" + jName + ".rightOutputIndex);"
print >>fo, "\t\t" + jName + ".rightPos = (int *)malloc(sizeof(int)*" + str(len(rOutList)) + ");"
print >>fo, "\t\tCHECK_POINTER(" + jName + ".rightPos);"
for j in range(0,len(rOutList)):
ctype = to_ctype(rAttrList[j].type)
print >>fo, "\t\t" + jName + ".rightOutputIndex[" + str(j) + "] = " + str(rOutList[j]) + ";"
print >>fo, "\t\t" + jName + ".rightOutputAttrType[" + str(j) + "] = " + ctype + ";"
print >>fo, "\t\t" + jName + ".rightPos[" + str(j) + "] = " + str(rPosList[j]) + ";"
print >>fo, "\t\t" + jName + ".tupleSize += " + dimName + "->attrSize[" + str(rOutList[j]) + "];"
print >>fo, "\t\t" + jName + ".rightKeyIndex = " + str(joinAttr.dimIndex[i]) + ";"
print >>fo, "\t\t" + jName + ".leftKeyIndex = " + str(joinAttr.factIndex[i]) + ";"
if CODETYPE == 0:
print >>fo, "\t\tstruct tableNode *join" + str(i) + " = hashJoin(&" + jName + ",&pp);\n"
else:
print >>fo, "\t\tstruct tableNode *join" + str(i) + " = hashJoin(&" + jName + ", &context, &pp);\n"
factName = "join" + str(i)
if selectOnly == 0:
print >>fo, "\t\tif(blockTotal !=1){"
if CODETYPE == 0:
print >>fo, "\t\t\tmergeIntoTable("+resultNode+",join" + str(i) + ", &pp);"
else:
print >>fo, "\t\t\tmergeIntoTable("+resultNode+",join" + str(i) + ", &context, &pp);"
print >>fo, "\t\t\tclock_gettime(CLOCK_REALTIME,&diskStart);"
for i in range(0,len(joinAttr.dimTables)):
jName = "join" + str(i)
print >>fo, "\t\t\tfreeTable(" + jName + ");"
if CODETYPE == 1:
print >>fo, "\t\t\tclFinish(context.queue);"
print >>fo, "\t\t\tclock_gettime(CLOCK_REALTIME,&diskEnd);"
print >>fo, "\t\t\tdiskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec;"
print >>fo, "\t\t}else{"
print >>fo, "\t\t\tclock_gettime(CLOCK_REALTIME,&diskStart);"
print >>fo, "\t\t\tfreeTable(" +resultNode + ");"
print >>fo, "\t\t\t"+resultNode+" = join" + str(i) + ";"
for i in range(0,len(joinAttr.dimTables)-1):
jName = "join" + str(i)
print >>fo, "\t\t\tfreeTable(" + jName + ");"
if CODETYPE == 1:
print >>fo, "\t\t\tclFinish(context.queue);"
print >>fo, "\t\t\tclock_gettime(CLOCK_REALTIME,&diskEnd);"
print >>fo, "\t\t\tdiskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec;"
print >>fo, "\t\t}"
else:
print >>fo, "\t\tif(blockTotal !=1){"
if CODETYPE == 0:
print >>fo, "\t\t\tmergeIntoTable("+resultNode+"," + resName + ",&pp);"
else:
print >>fo, "\t\t\tmergeIntoTable("+resultNode+"," + resName + ", &context, &pp);"
print >>fo, "\t\t}else{"
print >>fo, "\t\t\tclock_gettime(CLOCK_REALTIME,&diskStart);"
print >>fo, "\t\t\tfreeTable(" +resultNode + ");"
print >>fo, "\t\t\t"+resultNode+" = " + resName + ";"
if CODETYPE == 1:
print >>fo, "\t\t\tclFinish(context.queue);"
print >>fo, "\t\t\tclock_gettime(CLOCK_REALTIME,&diskEnd);"
print >>fo, "\t\t\tdiskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec;"
print >>fo, "\t\t}"
if hasWhere != 0:
print >>fo, "\t\tclock_gettime(CLOCK_REALTIME,&diskStart);"
print >>fo, "\t\tfreeScan(&" + relName + ");\n"
if CODETYPE == 1:
print >>fo, "\t\tclFinish(context.queue);"
print >>fo, "\t\tclock_gettime(CLOCK_REALTIME,&diskEnd);"
print >>fo, "\t\tdiskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec;"
print >>fo, "\t}\n"
elif joinType == 1:
"""
Generating codes for invisible join.
"""
factName = joinAttr.factTables[0].table_name.lower() + "Table"
resName = joinAttr.factTables[0].table_name.lower() + "Res"
selectOnly = len(joinAttr.dimTables) == 0
hasWhere = 0
setTupleNum = 0
selectList = joinAttr.factTables[0].select_list.tmp_exp_list
indexList = []
colList = []
generate_col_list(joinAttr.factTables[0],indexList,colList)
totalAttr = len(indexList)
for i in range(0,totalAttr):
col = colList[i]
if isinstance(col, ystree.YRawColExp):
colType = col.column_type
colIndex = col.column_name
ctype = to_ctype(colType)
colLen = type_length(joinAttr.factTables[0].table_name, colIndex, colType)
elif isinstance(col, ystree.YConsExp):
colType = col.cons_type
ctype = to_ctype(colType)
if cons_type == "INTEGER":
colLen = "sizeof(int)"
elif cons_type == "FLOAT":
colLen = "sizeof(float)"
else:
colLen = str(len(col.cons_value))
elif isinstance(col, ystree.YFuncExp):
print 1/0
if setTupleNum == 0:
setTupleNum = 1
print >>fo, "\toutFd = open(\"" + joinAttr.factTables[0].table_name + str(colIndex) + "\",O_RDONLY);"
print >>fo, "\tread(outFd, &header, sizeof(struct columnHeader));"
print >>fo, "\tblockTotal = header.blockTotal;"
print >>fo, "\tclose(outFd);"
factIndex = []
factInputList = joinAttr.factTables[0].select_list.tmp_exp_list
dimNum = len(joinAttr.dimTables)
outputList = joinAttr.joinNode[dimNum-1].select_list.tmp_exp_list
outputNum = len(outputList)
jName = "jNode"
print >>fo, "\tstruct joinNode " + jName + ";"
print >>fo, "\t" + jName + ".dimNum = " + str(dimNum) + ";"
print >>fo, "\t" + jName + ".dimTable = (struct tableNode **) malloc(sizeof(struct tableNode) * " + jName + ".dimNum);"
print >>fo, "\tCHECK_POINTER(" + jName + ".dimTable);"
print >>fo, "\t" + jName + ".factIndex = (int *) malloc(sizeof(int) * " + jName + ".dimNum);"
print >>fo, "\tCHECK_POINTER(" + jName + ".factIndex);"
print >>fo, "\t" + jName + ".dimIndex = (int *) malloc(sizeof(int) * " + jName + ".dimNum);\n"
print >>fo, "\tCHECK_POINTER(" + jName + ".dimIndex);"
for i in joinAttr.factIndex:
for j in range(0, len(factInputList)):
if i == factInputList[j].column_name:
break
factIndex.append(j)
for i in range(0, dimNum):
print >>fo, "\t" + jName + ".dimIndex[" + str(i) + "] = " + str(joinAttr.dimIndex[i]) + ";"
print >>fo, "\t" + jName + ".factIndex[" + str(i) + "] = " + str(factIndex[i]) + ";"
dimName = joinAttr.dimTables[i].table_name.lower() + "Res"
print >>fo, "\t" + jName + ".dimTable["+str(i) + "] = " + dimName + ";\n"
print >>fo, "\t" + jName + ".totalAttr = " + str(outputNum) + ";"
print >>fo, "\t" + jName + ".keepInGpu = (int *) malloc(sizeof(int) * " + str(outputNum) + ");"
print >>fo, "\tCHECK_POINTER(" + jName + ".keepInGpu);"
if keepInGpu == 0:
print >>fo, "\tfor(int k=0;k<" + str(outputNum) + ";k++)"
print >>fo, "\t\t" + jName + ".keepInGpu[k] = 0;\n"
else:
print >>fo, "\tfor(int k=0;k<" + str(outputNum) + ";k++)"
print >>fo, "\t\t" + jName + ".keepInGpu[k] = 1;\n"
print >>fo, "\t" + jName +".attrType = (int *) (malloc(sizeof(int) * "+ jName + ".totalAttr));"
print >>fo, "\tCHECK_POINTER(" + jName + ".attrType);"
print >>fo, "\t" + jName +".attrSize = (int *) (malloc(sizeof(int) * "+ jName + ".totalAttr));"
print >>fo, "\tCHECK_POINTER(" + jName + ".attrSize);"
tupleSize = "0"
for i in range(0, outputNum):
colType = outputList[i].column_type
ctype = to_ctype(colType)
newExp = ystree.__trace_to_leaf__(joinAttr.joinNode[dimNum-1], outputList[i], False)
colLen = type_length(newExp.table_name,newExp.column_name,colType)
tupleSize = tupleSize + "+" + colLen
print >>fo, "\t" + jName + ".attrType[" + str(i) + "] = " + ctype + ";"
print >>fo, "\t" + jName + ".attrSize[" + str(i) + "] = " + str(colLen) + ";"
print >>fo, "\t" + jName + ".tupleSize = " + tupleSize + ";\n"
factOutputNum = 0
factOutputIndex = []
dimOutputExp = []
factOutputPos = []
dimPos = []
for i in range(0, outputNum):
newExp = ystree.__trace_to_leaf__(joinAttr.joinNode[dimNum-1], outputList[i], False)
if newExp.table_name == joinAttr.factTables[0].table_name:
factOutputNum +=1
for j in range(0, len(factInputList)):
if newExp.column_name == factInputList[j].column_name:
break
factOutputIndex.append(j)
factOutputPos.append(i)
else:
dimOutputExp.append(newExp)
dimPos.append(i)
print >>fo, "\t" + jName + ".factOutputNum = " + str(factOutputNum) + ";"
print >>fo, "\t" + jName + ".factOutputIndex = (int *) malloc(" + jName + ".factOutputNum * sizeof(int));"
print >>fo, "\tCHECK_POINTER(" + jName + ".factOutputIndex);"
print >>fo, "\t" + jName + ".factOutputPos = (int *) malloc(" + jName + ".factOutputNum * sizeof(int));"
print >>fo, "\tCHECK_POINTER(" + jName + ".factOutputpos);"
for i in range(0, factOutputNum):
print >>fo, "\t" + jName + ".factOutputIndex[" + str(i) + "] = " + str(factOutputIndex[i]) + ";"
print >>fo, "\t" + jName + ".factOutputPos[" + str(i) + "] = " + str(factOutputPos[i]) + ";"
dimOutputTotal = outputNum - factOutputNum
print >>fo, "\t" + jName + ".dimOutputTotal = " + str(dimOutputTotal) + ";"
print >>fo, "\t" + jName + ".dimOutputNum = (int *) malloc( sizeof(int) * " + jName + ".dimNum);"
print >>fo, "\tCHECK_POINTER(" + jName + ".dimOutputNum);"
print >>fo, "\t" + jName + ".dimOutputIndex = (int **) malloc( sizeof(int*) * " + jName + ".dimNum);"
print >>fo, "\tCHECK_POINTER(" + jName + ".dimOutputIndex);"
print >>fo, "\t" + jName + ".dimOutputPos = (int *) malloc( sizeof(int) * " + jName + ".dimOutputTotal);"
print >>fo, "\tCHECK_POINTER(" + jName + ".dimOutputPos);"
dimOutputPos = []
for i in range(0, len(joinAttr.dimTables)):
dimOutputNum = len(joinAttr.outIndex[i][1])
print >>fo, "\t" + jName + ".dimOutputNum[" + str(i) + "] = " + str(dimOutputNum) + ";"
if dimOutputNum >0:
print >>fo, "\t" + jName + ".dimOutputIndex[" + str(i) + "] = (int *) malloc(sizeof(int) *" +str(dimOutputNum) + ");"
print >>fo, "\tCHECK_POINTER(" + jName + ".dimOutputIndex);"
dimTableName = joinAttr.dimTables[i].table_name
dimExp = []
for exp in dimOutputExp:
if exp.table_name == dimTableName:
dimExp.append(exp)
pos = dimPos[dimOutputExp.index(exp)]
dimOutputPos.append(pos)
for exp in dimExp:
tmpList = joinAttr.dimTables[i].select_list.tmp_exp_list
for j in range(0, len(tmpList)):
if tmpList[j].column_name == exp.column_name:
print >>fo, "\t" + jName + ".dimOutputIndex[" + str(i) + "][" + str(dimExp.index(exp)) + "] = " + str(j) + ";"
break
for i in range(0, dimOutputTotal):
print >>fo, "\t" + jName + ".dimOutputPos[" + str(i) + "] = " + str(dimOutputPos[i]) + ";"
totalAttr = len(colList)
print >>fo, "\tlong blockSize["+str(totalAttr) + "];"
print >>fo, "\tfor(int i=0;i<" + str(totalAttr) + ";i++)"
print >>fo, "\t\tblockSize[i] = 0;"
print >>fo, "\toffset = 0;\n"
print >>fo, "\tfor(int i=0;i<blockTotal;i++){\n"
print >>fo, "\t\tstruct tableNode *" + factName + " = (struct tableNode*)malloc(sizeof(struct tableNode));"
print >>fo, "\t\tCHECK_POINTER(" + factName + ");"
print >>fo, "\t\t" + factName + "->totalAttr = " + str(totalAttr) + ";"
print >>fo, "\t\t" + factName + "->attrType = (int *) malloc(sizeof(int)*" + str(totalAttr) + ");"
print >>fo, "\t\tCHECK_POINTER(" + factName + "->attrType);"
print >>fo, "\t\t" + factName + "->attrSize = (int *) malloc(sizeof(int)*" + str(totalAttr) + ");"
print >>fo, "\t\tCHECK_POINTER(" + factName + "->attrSize);"
print >>fo, "\t\t" + factName + "->attrIndex = (int *) malloc(sizeof(int)*" + str(totalAttr) + ");"
print >>fo, "\t\tCHECK_POINTER(" + factName + "->attrIndex);"
print >>fo, "\t\t" + factName + "->attrTotalSize = (int *) malloc(sizeof(int)*" + str(totalAttr) + ");"
print >>fo, "\t\tCHECK_POINTER(" + factName + "->attrTotalSize);"
print >>fo, "\t\t" + factName + "->dataPos = (int *) malloc(sizeof(int)*" + str(totalAttr) + ");"
print >>fo, "\t\tCHECK_POINTER(" + factName + "->dataPos);"
print >>fo, "\t\t" + factName + "->dataFormat = (int *) malloc(sizeof(int)*" + str(totalAttr) + ");"
print >>fo, "\t\tCHECK_POINTER(" + factName + "->dataFormat);"
print >>fo, "\t\t" + factName + "->content = (char **) malloc(sizeof(char *)*" + str(totalAttr) + ");"
print >>fo, "\t\tCHECK_POINTER(" + factName + "->content);"
tupleSize = "0"
for i in range(0,totalAttr):
col = colList[i]
if isinstance(col, ystree.YRawColExp):
colType = col.column_type
colIndex = col.column_name
ctype = to_ctype(colType)
colLen = type_length(joinAttr.factTables[0].table_name, colIndex, colType)
elif isinstance(col, ystree.YConsExp):
colType = col.cons_type
ctype = to_ctype(colType)
if cons_type == "INTEGER":
colLen = "sizeof(int)"
elif cons_type == "FLOAT":
colLen = "sizeof(float)"
else:
colLen = str(len(col.cons_value))
elif isinstance(col, ystree.YFuncExp):
print 1/0
tupleSize += " + " + colLen
print >>fo, "\t\t" + factName + "->attrType[" + str(i) + "] = " + ctype + ";"
print >>fo, "\t\t" + factName + "->attrSize[" + str(i) + "] = " + colLen + ";"
print >>fo, "\t\t" + factName + "->attrIndex[" + str(i) + "] = " + str(colIndex) + ";"
if POS == 0:
print >>fo, "\t\t" + factName + "->dataPos[" + str(i) + "] = MEM;"
elif POS == 1:
print >>fo, "\t\t" + factName + "->dataPos[" + str(i) + "] = PINNED;"
elif POS == 2:
print >>fo, "\t\t" + factName + "->dataPos[" + str(i) + "] = UVA;"
elif POS == 3:
print >>fo, "\t\t" + factName + "->dataPos[" + str(i) + "] = MMAP;"
elif POS == 4:
print >>fo, "\t\t" + factName + "->dataPos[" + str(i) + "] = GPU;"
else:
print >>fo, "\t\t" + factName + "->dataPos[" + str(i) + "] = MEM;"
tupleSize += ";\n"
print >>fo, "\t\t" + factName + "->tupleSize = " + tupleSize
for i in range(0,totalAttr):
col = colList[i]
colType = col.column_type
colIndex = col.column_name
colLen = type_length(joinAttr.factTables[0].table_name, colIndex, colType)
print >>fo, "\t\toutFd = open(\"" + joinAttr.factTables[0].table_name + str(colIndex) + "\", O_RDONLY);"
print >>fo, "\t\toffset = i*sizeof(struct columnHeader) + blockSize["+str(i)+"];"
print >>fo, "\t\tlseek(outFd,offset,SEEK_SET);"
print >>fo, "\t\tread(outFd, &header, sizeof(struct columnHeader));"
print >>fo, "\t\tblockSize[" + str(i) + "] += header.blockSize;"
print >>fo, "\t\toffset += sizeof(struct columnHeader);"
print >>fo, "\t\t" + factName + "->dataFormat[" + str(i) + "] = header.format;"
print >>fo, "\t\toutSize = header.blockSize;"
print >>fo, "\t\tclock_gettime(CLOCK_REALTIME,&diskStart);"
print >>fo, "\t\toutTable = (char *)mmap(0,outSize,PROT_READ,MAP_SHARED,outFd,offset);"
if CODETYPE == 0:
if POS == 0:
print >>fo, "\t\t" + factName + "->content[" + str(i) + "] = (char*)malloc(outSize);\n"
print >>fo, "\t\tCHECK_POINTER(" + factName + "->content[" + str(i) + "];"
elif POS == 1:
print >>fo, "\t\tCUDA_SAFE_CALL_NO_SYNC(cudaMallocHost((void**)&"+factName+"->content["+str(i)+"],outSize));"
elif POS == 2:
print >>fo, "\t\tCUDA_SAFE_CALL_NO_SYNC(cudaMallocHost((void**)&"+factName+"->content["+str(i)+"],outSize));"
elif POS == 3:
print >>fo, "\t\t"+factName+"->content["+str(i)+"] = (char *)mmap(0,outSize,PROT_READ,MAP_SHARED,outFd,offset);"
elif POS == 4:
print >>fo, "\t\tCUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&"+factName+"->content["+str(i)+"],outSize));"
else:
print >>fo, "\t\t" + factName + "->content[" + str(i) + "] = (char *)memalign(256,outSize);\n"
print >>fo, "\t\tmemcpy("+factName+"->content["+str(i)+"],outTable,outSize);"
else:
if POS == 0:
print >>fo, "\t\t"+factName+"->content["+str(i)+"] = (char *)memalign(256,outSize);"
print >>fo, "\t\tmemcpy("+factName+"->content["+str(i)+"],outTable,outSize);"
elif POS == 3:
print >>fo, "\t\t"+factName+"->content["+str(i)+"] = (char *)mmap(0,outSize,PROT_READ,MAP_SHARED,outFd,offset);"
else:
print >>fo, "\t\t"+factName+"->content["+str(i)+"] = (char *)clCreateBuffer(context.context,CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR,outSize,NULL,0);"
print >>fo, "\t\tclTmp = clEnqueueMapBuffer(context.queue,(cl_mem)"+factName+"->content["+str(i)+"],CL_TRUE,CL_MAP_WRITE,0,outSize,0,0,0,0);"
print >>fo, "\t\tmemcpy(clTmp,outTable,outSize);"
print >>fo, "\t\tclEnqueueUnmapMemObject(context.queue,(cl_mem)"+factName+"->content["+str(i)+"],clTmp,0,0,0);"
print >>fo, "\t\tmunmap(outTable,outSize);"
print >>fo, "\t\tclock_gettime(CLOCK_REALTIME,&diskEnd);"
print >>fo, "\t\tdiskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec;"
print >>fo, "\t\tclose(outFd);"
print >>fo, "\t\t" + factName + "->attrTotalSize[" + str(i) + "] = outSize;"
print >>fo, "\t\t" + factName + "->tupleNum = header.tupleNum;"
if joinAttr.factTables[0].where_condition is not None:
hasWhere = 1
whereExp = joinAttr.factTables[0].where_condition.where_condition_exp
whereList = []
relList = []
conList = []
get_where_attr(whereExp,whereList,relList,conList)
newWhereList = []
whereLen = count_whereList(whereList, newWhereList)
nested = count_whereNested(whereExp)
if nested !=0:
print "Not supported yet: the where expression is too complicated"
print 1/0
relName = joinAttr.factTables[0].table_name.lower() + "Rel"
print >>fo, "\t\tstruct scanNode " + relName + ";"
print >>fo, "\t\t" + relName + ".tn = " + factName + ";"
print >>fo, "\t\t" + relName + ".hasWhere = 1;"
print >>fo, "\t\t" + relName + ".outputNum = " + str(len(selectList)) + ";"
print >>fo, "\t\t" + relName + ".whereAttrNum = " + str(whereLen) + ";"
print >>fo, "\t\t" + relName + ".whereIndex = (int *)malloc(sizeof(int)*" + str(whereLen) + ");"
print >>fo, "\t\tCHECK_POINTER(" + relName + ".whereIndex);"
print >>fo, "\t\t" + relName + ".outputIndex = (int *)malloc(sizeof(int)*" + str(len(selectList)) + ");"
print >>fo, "\t\tCHECK_POINTER(" + relName + ".outputIndex);"
if keepInGpu == 0:
print >>fo, "\t\t" + relName + ".keepInGpu = 0;"
else:
print >>fo, "\t\t" + relName + ".keepInGpu = 1;"
for i in range(0,len(newWhereList)):
colIndex = int(newWhereList[i].column_name)
print >>fo, "\t\t" + relName + ".whereIndex["+str(i) + "] = " + str(colIndex) + ";"
for i in range(0,len(selectList)):
colIndex = selectList[i].column_name
outputIndex = indexList.index(colIndex)
print >>fo, "\t\t" + relName + ".outputIndex[" + str(i) + " ] = " + str(outputIndex) + ";"
print >>fo, "\t\t" + relName + ".filter = (struct whereCondition *)malloc(sizeof(struct whereCondition));"
print >>fo, "\t\tCHECK_POINTER(" + relName + ".filter);"
print >>fo, "\t\t(" + relName + ".filter)->nested = 0;"
print >>fo, "\t\t(" + relName + ".filter)->expNum = " + str(len(whereList)) + ";"
print >>fo, "\t\t(" + relName + ".filter)->exp = (struct whereExp*) malloc(sizeof(struct whereExp) *" + str(len(whereList)) + ");"
print >>fo, "\t\tCHECK_POINTER((" + relName + ".filter)->exp);"
if joinAttr.factTables[0].where_condition.where_condition_exp.func_name in ["AND","OR"]:
print >>fo, "\t\t(" + relName + ".filter)->andOr = " + joinAttr.factTables[0].where_condition.where_condition_exp.func_name + ";"
else:
print >>fo, "\t\t(" + relName + ".filter)->andOr = EXP;"
for i in range(0,len(whereList)):
colIndex = -1
for j in range(0,len(newWhereList)):
if newWhereList[j].compare(whereList[i]) is True:
colIndex = j
break
if colIndex <0:
print 1/0
print >>fo, "\t\t(" + relName + ".filter)->exp[" + str(i) + "].index = " + str(colIndex) + ";"
print >>fo, "\t\t(" + relName + ".filter)->exp[" + str(i) + "].relation = " + relList[i] + ";"
colType = whereList[i].column_type
ctype = to_ctype(colType)
if ctype == "INT":
print >>fo, "\t\t{"
print >>fo, "\t\t\tint tmp = " + conList[i] + ";"
print >>fo, "\t\t\tmemcpy((" + relName + ".filter)->exp[" + str(i) + "].content, &tmp,sizeof(int));"
print >>fo, "\t\t}"
elif ctype == "FLOAT":
print >>fo, "\t\t{"
print >>fo, "\t\t\tfloat tmp = " + conList[i] + ";"
print >>fo, "\t\t\tmemcpy((" + relName + ".filter)->exp[" + str(i) + "].content, &tmp,sizeof(float));"
print >>fo, "\t\t}"
print 1/0
else:
print >>fo, "\t\tstrcpy((" + relName + ".filter)->exp[" + str(i) + "].content," + conList[i] + ");\n"
if CODETYPE == 0:
print >>fo, "\t\tstruct tableNode * " + resName + " = tableScan(&" + relName + ", &pp);"
else:
print >>fo, "\t\tstruct tableNode * " + resName + " = tableScan(&" + relName + ", &context,&pp);"
if selectOnly == 0:
print >>fo, "\t\tclock_gettime(CLOCK_REALTIME,&diskStart);"
print >>fo, "\t\tfreeScan(&" + relName + ");\n"
if CODETYPE == 1:
print >>fo, "\t\tclFinish(context.queue);"
print >>fo, "\t\tclock_gettime(CLOCK_REALTIME,&diskEnd);"
print >>fo, "\t\tdiskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec;"
else:
hasWhere = 0
print >>fo, "\t\tstruct tableNode * " + resName + " = " + factName + ";"
print >>fo, "\t\t" + jName + ".factTable = " + resName + ";"
if CODETYPE == 0:
print >>fo, "\t\tstruct tableNode *join1 = inviJoin(&" + jName + ", &pp);"
else:
print >>fo, "\t\tstruct tableNode *join1 = inviJoin(&" + jName + ", &context,&pp);"
print >>fo, "\t\tclock_gettime(CLOCK_REALTIME,&diskStart);"
print >>fo, "\t\tfreeTable(" + resName + ");"
if CODETYPE == 1:
print >>fo, "\t\tclFinish(context.queue);"
print >>fo, "\t\tclock_gettime(CLOCK_REALTIME,&diskEnd);"
print >>fo, "\t\tdiskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec;"
print >>fo, "\t\tif(blockTotal !=1){"
if CODETYPE == 0:
print >>fo, "\t\t\tmergeIntoTable(" + resultNode + ",join1,&pp);"
else:
print >>fo, "\t\t\tmergeIntoTable(" + resultNode + ",join1,&context,&pp);"
print >>fo, "\t\t\tclock_gettime(CLOCK_REALTIME,&diskStart);"
print >>fo, "\t\t\tfreeTable(join1);"
if CODETYPE == 1:
print >>fo, "\t\t\tclFinish(context.queue);"
print >>fo, "\t\t\tclock_gettime(CLOCK_REALTIME,&diskEnd);"
print >>fo, "\t\t\tdiskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec;"
print >>fo, "\t\t}else"
print >>fo, "\t\t\t" + resultNode + "=join1;"
print >>fo, "\t}\n"
if len(aggNode) >0 :
"""
Generate codes for aggregation node.
"""
gb_exp_list = aggNode[0].group_by_clause.groupby_exp_list
select_list = aggNode[0].select_list.tmp_exp_list
selectLen = len(select_list)
gbLen = len(gb_exp_list)
print >>fo, "\tstruct groupByNode * gbNode = (struct groupByNode *) malloc(sizeof(struct groupByNode));"
print >>fo, "\tCHECK_POINTER(gbNode);"
print >>fo, "\tgbNode->table = " +resultNode +";"
print >>fo, "\tgbNode->groupByColNum = " + str(gbLen) + ";"
print >>fo, "\tgbNode->groupByIndex = (int *)malloc(sizeof(int) * " + str(gbLen) + ");"
print >>fo, "\tCHECK_POINTER(gbNode->groupByIndex);"
print >>fo, "\tgbNode->groupByType = (int *)malloc(sizeof(int) * " + str(gbLen) + ");"
print >>fo, "\tCHECK_POINTER(gbNode->groupByType);"
print >>fo, "\tgbNode->groupBySize = (int *)malloc(sizeof(int) * " + str(gbLen) + ");"
print >>fo, "\tCHECK_POINTER(gbNode->groupBySize);"
for i in range(0,gbLen):
exp = gb_exp_list[i]
if isinstance(exp, ystree.YRawColExp):
print >>fo, "\tgbNode->groupByIndex[" + str(i) + "] = " + str(exp.column_name) + ";"
print >>fo, "\tgbNode->groupByType[" + str(i) + "] = gbNode->table->attrType[" + str(exp.column_name) + "];"
print >>fo, "\tgbNode->groupBySize[" + str(i) + "] = gbNode->table->attrSize[" + str(exp.column_name) + "];"
elif isinstance(exp, ystree.YConsExp):
print >>fo, "\tgbNode->groupByIndex[" + str(i) + "] = -1;"
print >>fo, "\tgbNode->groupByType[" + str(i) + "] = INT;"
print >>fo, "\tgbNode->groupBySize[" + str(i) + "] = sizeof(int);"
else:
print 1/0
print >>fo, "\tgbNode->outputAttrNum = " + str(selectLen) + ";"
print >>fo, "\tgbNode->attrType = (int *) malloc(sizeof(int) *" + str(selectLen) + ");"
print >>fo, "\tCHECK_POINTER(gbNode->attrType);"
print >>fo, "\tgbNode->attrSize = (int *) malloc(sizeof(int) *" + str(selectLen) + ");"
print >>fo, "\tCHECK_POINTER(gbNode->attrSize);"
print >>fo, "\tgbNode->tupleSize = 0;"
print >>fo, "\tgbNode->gbExp = (struct groupByExp *) malloc(sizeof(struct groupByExp) * " + str(selectLen) + ");"
print >>fo, "\tCHECK_POINTER(gbNode->gbExp);"
for i in range(0,selectLen):
exp = select_list[i]
if isinstance(exp, ystree.YFuncExp):
print >>fo, "\tgbNode->tupleSize += sizeof(float);"
print >>fo, "\tgbNode->attrType[" + str(i) + "] = FLOAT;"
print >>fo, "\tgbNode->attrSize[" + str(i) + "] = sizeof(float);"
print >>fo, "\tgbNode->gbExp["+str(i)+"].func = " + exp.func_name + ";"
para = exp.parameter_list[0]
mathFunc = mathExp()
mathFunc.addOp(para)
prefix = "\tgbNode->gbExp[" + str(i) + "].exp"
printMathFunc(fo,prefix, mathFunc)
elif isinstance(exp, ystree.YRawColExp):
colIndex = exp.column_name
print >>fo, "\tgbNode->attrType[" + str(i) + "] = " + resultNode + "->attrType[" + str(colIndex) + "];"
print >>fo, "\tgbNode->attrSize[" + str(i) + "] = " + resultNode + "->attrSize[" + str(colIndex) + "];"
print >>fo, "\tgbNode->tupleSize += "+resultNode + "->attrSize[" + str(colIndex) + "];"
print >>fo, "\tgbNode->gbExp[" + str(i) + "].func = NOOP;"
print >>fo, "\tgbNode->gbExp[" + str(i) + "].exp.op = NOOP;"
print >>fo, "\tgbNode->gbExp[" + str(i) + "].exp.exp = NULL;"
print >>fo, "\tgbNode->gbExp[" + str(i) + "].exp.opNum = 1;"
print >>fo, "\tgbNode->gbExp[" + str(i) + "].exp.opType = COLUMN;"
print >>fo, "\tgbNode->gbExp[" + str(i) + "].exp.opValue = " + str(exp.column_name) + ";"
else:
if exp.cons_type == "INTEGER":
print >>fo, "\tgbNode->attrType[" + str(i) + "] = INT;"
print >>fo, "\tgbNode->attrSize[" + str(i) + "] = sizeof(int);"
print >>fo, "\tgbNode->tupleSize += sizeof(int);"
elif exp.cons_type == "FLOAT":
print >>fo, "\tgbNode->attrType[" + str(i) + "] = FLOAT;"
print >>fo, "\tgbNode->attrSize[" + str(i) + "] = sizeof(float);"
print >>fo, "\tgbNode->tupleSize += sizeof(float);"
else:
print 1/0
print >>fo, "\tgbNode->gbExp[" + str(i) + "].func = NOOP;"
print >>fo, "\tgbNode->gbExp[" + str(i) + "].exp.op = NOOP;"
print >>fo, "\tgbNode->gbExp[" + str(i) + "].exp.exp = NULL;"
print >>fo, "\tgbNode->gbExp[" + str(i) + "].exp.opNum = 1;"
print >>fo, "\tgbNode->gbExp[" + str(i) + "].exp.opType = CONS;"
print >>fo, "\tgbNode->gbExp[" + str(i) + "].exp.opValue = " + str(exp.cons_value) + ";"
resultNode = "gbResult"
if CODETYPE == 0:
print >>fo, "\tstruct tableNode * " + resultNode + " = groupBy(gbNode, &pp);"
else:
print >>fo, "\tstruct tableNode * " + resultNode + " = groupBy(gbNode, &context,&pp);"
print >>fo, "\tfreeGroupByNode(gbNode);\n"
if len(orderbyNode) > 0 :
"""
Generate codes for order by node.
"""
orderby_exp_list = orderbyNode[0].order_by_clause.orderby_exp_list
odLen = len(orderby_exp_list)
print >>fo, "\tstruct orderByNode * odNode = (struct orderByNode *) malloc(sizeof(struct orderByNode));"
print >>fo, "\tCHECK_POINTER(odNode);"
print >>fo, "\todNode->table = " +resultNode +";"
print >>fo, "\todNode->orderByNum = " + str(odLen) + ";"
print >>fo, "\todNode->orderBySeq = (int *) malloc(sizeof(int) * odNode->orderByNum);"
print >>fo, "\tCHECK_POINTER(odNode->orderBySeq);"
print >>fo, "\todNode->orderByIndex = (int *) malloc(sizeof(int) * odNode->orderByNum);"
print >>fo, "\tCHECK_POINTER(odNode->orderByIndex);"
for i in range(0,odLen):
seq = orderbyNode[0].order_by_clause.order_indicator_list[i]
if seq == "ASC":
print >>fo, "\todNode->orderBySeq[" + str(i) + "] = ASC;"
else:
print >>fo, "\todNode->orderBySeq[" + str(i) + "] = DESC;"
print >>fo, "\todNode->orderByIndex[" + str(i) + "] = " + str(orderby_exp_list[i].column_name) + ";"
resultNode = "odResult"
if CODETYPE == 0:
print >>fo, "\tstruct tableNode * " + resultNode + " = orderBy(odNode,&pp);"
else:
print >>fo, "\tstruct tableNode * " + resultNode + " = orderBy(odNode, &context,&pp);"
print >>fo, "\tfreeOrderByNode(odNode);\n"
print >>fo, "\tstruct materializeNode mn;"
print >>fo, "\tmn.table = "+resultNode + ";"
if CODETYPE == 0:
print >>fo, "\tmaterializeCol(&mn, &pp);"
else:
print >>fo, "\tmaterializeCol(&mn, &context,&pp);"
print >>fo, "\tfreeTable("+resultNode + ");\n"
if CODETYPE == 1:
print >>fo, "\tclReleaseCommandQueue(context.queue);"
print >>fo, "\tclReleaseContext(context.context);"
print >>fo, "\tclReleaseProgram(context.program);\n"
print >>fo, "\tclock_gettime(CLOCK_REALTIME,&end);"
print >>fo, "\tdouble timeE = (end.tv_sec - start.tv_sec)* BILLION + end.tv_nsec - start.tv_nsec;"
print >>fo, "\tprintf(\"Disk Load Time: %lf\\n\", diskTotal/(1000*1000));"
print >>fo, "\tprintf(\"PCIe Time: %lf\\n\",pp.pcie);"
print >>fo, "\tprintf(\"Kernel Time: %lf\\n\",pp.kernel);"
print >>fo, "\tprintf(\"Total Time: %lf\\n\", timeE/(1000*1000));"
print >>fo, "}\n"
fo.close()
"""
gpudb_code_gen: entry point for code generation.
"""
def gpudb_code_gen(argv):
pwd = os.getcwd()
resultDir = "./src"
utilityDir = "./utility"
if CODETYPE == 0:
codeDir = "./cuda"
else:
codeDir = "./opencl"
includeDir = "./include"
schemaFile = None
if len(sys.argv) == 3:
tree_node = ystree.ysmart_tree_gen(argv[1],argv[2])
elif len(sys.argv) == 2:
schemaFile = ystree.ysmart_get_schema(argv[1])
if len(sys.argv) == 3 and tree_node is None:
exit(-1)
if os.path.exists(resultDir) is False:
os.makedirs(resultDir)
os.chdir(resultDir)
if os.path.exists(codeDir) is False:
os.makedirs(codeDir)
if os.path.exists(includeDir) is False:
os.makedirs(includeDir)
os.chdir(includeDir)
generate_schema_file()
os.chdir(pwd)
os.chdir(resultDir)
os.chdir(utilityDir)
generate_loader()
if SOA == 1:
generate_soa()
os.chdir(pwd)
os.chdir(resultDir)
os.chdir(codeDir)
if len(sys.argv) == 3:
generate_code(tree_node)
os.chdir(pwd)
os.chdir(resultDir)
os.chdir(utilityDir)
if schemaFile is not None:
metaFile = open(".metadata",'wb')
pickle.dump(schemaFile, metaFile)
metaFile.close()
os.chdir(pwd)
| {
"content_hash": "8f76ad4ef2841428c7dc18b23b60aad5",
"timestamp": "",
"source": "github",
"line_count": 2223,
"max_line_length": 168,
"avg_line_length": 45.15969410706253,
"alnum_prop": 0.5127801573861939,
"repo_name": "anilshanbhag/GPUDB",
"id": "45b6d9f83b69ff0839fb2d778590f7fee67c862f",
"size": "100390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "XML2CODE/code_gen.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "363987"
},
{
"name": "C++",
"bytes": "162466"
},
{
"name": "Cuda",
"bytes": "192394"
},
{
"name": "GAP",
"bytes": "49795"
},
{
"name": "HTML",
"bytes": "25396"
},
{
"name": "Makefile",
"bytes": "5887"
},
{
"name": "Objective-C",
"bytes": "7584"
},
{
"name": "Python",
"bytes": "3097808"
},
{
"name": "TeX",
"bytes": "8620"
}
],
"symlink_target": ""
} |
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import unittest
from sample.str_process import classLv01
from sample.process.handler import Handler
class BasicTestSuite(unittest.TestCase):
"""
Test level 1 class
"""
def test_CalssLv01(self):
str = classLv01.process_str("a Simple Test")
assert(str == "a simple test")
str = classLv01.process_str("A simple test")
assert(str == "A SIMPLE TEST")
"""
Test level 2 class
"""
def test_Handler(self):
str = Handler.process_string("a Simple Test")
assert(str == "a simple test")
str = Handler.process_string("A simple test")
assert(str == "A SIMPLE TEST")
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "5b557a66349c00bd4d8314924e4f7e7e",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 82,
"avg_line_length": 25.15625,
"alnum_prop": 0.6161490683229813,
"repo_name": "dqi2018/python-structure",
"id": "ea3486d1fd864d1519e4b599966564a5951735f0",
"size": "829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2663"
}
],
"symlink_target": ""
} |
"""Fetch managed repos.
This can be useful if you are switching from one arcyd instance to
another, to 'pre-fetch' before actually moving over.
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdcmd_fetch
#
# Public Functions:
# getFromfilePrefixChars
# setupParser
# process
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import print_function
import sys
import phlsys_git
import phlsys_pid
import phlurl_watcher
import abdi_processrepoargs
import abdi_repoargs
import abdt_fs
import abdt_git
def getFromfilePrefixChars():
return None
def setupParser(parser):
pass
def process(args):
_ = args # NOQA
fs = abdt_fs.make_default_accessor()
with fs.lockfile_context():
pid = fs.get_pid_or_none()
if pid is not None and phlsys_pid.is_running(pid):
raise Exception("cannot fetch whilst arcyd is running.")
repo_config_path_list = fs.repo_config_path_list()
repo_name_config_list = abdi_repoargs.parse_config_file_list(
repo_config_path_list)
url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
fs.layout.urlwatcher_cache_path)
# Let the user know what's happening before potentially blocking for a
# while.
print('Refreshing repository snoop status ..', end=' ')
# Make sure that the output is actually visible by flushing stdout
# XXX: Will use 'flush' parameter to 'print()' in Python 3.3
sys.stdout.flush()
print("done")
url_watcher_wrapper.watcher.refresh()
for repo_name, repo_config in repo_name_config_list:
print(repo_name + ' ..', end=' ')
# Make sure that the output is actually visible by flushing stdout
# XXX: Will use 'flush' parameter to 'print()' in Python 3.3
sys.stdout.flush()
snoop_url = abdi_repoargs.get_repo_snoop_url(repo_config)
abd_repo = abdt_git.Repo(
phlsys_git.Repo(repo_config.repo_path),
"origin",
repo_config.repo_desc)
did_fetch = abdi_processrepoargs.fetch_if_needed(
url_watcher_wrapper.watcher,
snoop_url,
abd_repo,
repo_config.repo_desc)
if did_fetch:
print('fetched')
else:
print('skipped')
url_watcher_wrapper.save()
# -----------------------------------------------------------------------------
# Copyright (C) 2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| {
"content_hash": "0c11e794e8a89cbc56c611d380b88eb8",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 79,
"avg_line_length": 31.27433628318584,
"alnum_prop": 0.5588568194680249,
"repo_name": "valhallasw/phabricator-tools",
"id": "961f0f7d54edf63f5cc55aa91e54a812f4aa20ae",
"size": "3534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/abd/abdcmd_fetch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "342"
},
{
"name": "Puppet",
"bytes": "4246"
},
{
"name": "Python",
"bytes": "964066"
},
{
"name": "Ruby",
"bytes": "2000"
},
{
"name": "Shell",
"bytes": "128202"
}
],
"symlink_target": ""
} |
from .base import CanaryToolsBase
class DataBundles(object):
def __init__(self, console):
"""Initialize a DataBundles object
:param console: The Console from which API calls are made
"""
self.console = console
def parse(self, data):
"""Parse JSON data
:param data: JSON data
:return: A list of DataBundle objects
"""
bundles = list()
if data and 'bundles' in data:
for bundle in data['bundles']:
bundles.append(DataBundle.parse(self.console, bundle))
return bundles
class DataBundle(CanaryToolsBase):
def __init__(self, console, data):
"""Initilaize a DataBundle object
:param console: The Console from which API calls are made
:param data: JSON data
**Attributes:**
- **settings_key (str)** -- Key that identifies this DataBundle
- **req_len (str)** -- The length of the request
- **bytes_copied (int)** -- Number of bytes sent in this DataBundle
- **name (str)** -- The name or type of this DataBundle
- **checksum (str)** -- Checksum of the DataBundle
- **ended_time (int)** -- Time the DataBundle completed in epoch time
- **tag (str)** -- The DataBundles tag
- **type_ (str)** -- The type of the DataBundle
- **bundle_size (int)** -- Size of theD DataBundle in bytes
- **state (str)** -- The state the DataBundle is in
- **node_id (str)** -- The id of the device for which this DataBundle is sent
- **started_time (int)** -- Time the DataBundle started being sent, in epoch time
- **created_time (int)** -- Time the DataBundle was created, in epoch time
- **updated_time (int)** -- Time of the update in epoch time
"""
super(DataBundle, self).__init__(console, data)
def __str__(self):
"""Helper method"""
return "[DataBundle] name: {name};".format(name=self.name)
| {
"content_hash": "83d7ce651d665041df747651a7a1ba9d",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 93,
"avg_line_length": 39.30769230769231,
"alnum_prop": 0.571917808219178,
"repo_name": "thinkst/canarytools-python",
"id": "cf789d29f48401125db0c87aa13b4534f1d1ff87",
"size": "2044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "canarytools/models/databundles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "69643"
},
{
"name": "Shell",
"bytes": "454"
}
],
"symlink_target": ""
} |
import pytest
from tenable.nessus.schema.pagination import FilterSchema, ListSchema
def test_filter_schema_dict():
filter = {'filter': 'name', 'quality': 'eq', 'value': 'something'}
schema = FilterSchema()
assert schema.dump(schema.load(filter)) == filter
def test_filter_schema_tuple():
filter_out = {'filter': 'name', 'quality': 'eq', 'value': 'something'}
filter_tpl = ('name', 'eq', 'something')
schema = FilterSchema()
assert schema.dump(schema.load(filter_tpl)) == filter_out
def test_list_schema():
schema = ListSchema()
test = {
'limit': 10,
'offset': 0,
'sort_by': 'something',
'sort_order': 'DESC',
'search_type': 'AND',
'filters': [('something', 'eq', 'value'),
{'filter': 'a', 'quality': 'eq', 'value': 's2'}
]
}
resp = schema.dump(schema.load(test))
assert resp['limit'] == 10
assert resp['offset'] == 0
assert resp['sort_by'] == 'something'
assert resp['sort_order'] == 'desc'
assert resp['filter.search_type'] == 'and'
assert resp['filter.0.filter'] == 'something'
assert resp['filter.0.quality'] == 'eq'
assert resp['filter.0.value'] == 'value'
assert resp['filter.1.filter'] == 'a'
assert resp['filter.1.quality'] == 'eq'
assert resp['filter.1.value'] == 's2'
| {
"content_hash": "109e8ab05214d45f19b77fe5571d5a37",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 74,
"avg_line_length": 33.146341463414636,
"alnum_prop": 0.5732155997056659,
"repo_name": "tenable/pyTenable",
"id": "9deb5040e1ea968a3068348f5f66930a821543ae",
"size": "1359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/nessus/schema/test_pagination.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2769266"
}
],
"symlink_target": ""
} |
import optparse
from scapy.all import *
def findGuest(pkt):
raw = pkt.sprintf('%Raw.load%')
name = re.findall('(?i)LAST_NAME=(.*)&', raw)
room = re.findall("(?i)ROOM_NUMBER=(.*)'", raw)
if name:
print '[+] Found Hotel Guest ' + str(name[0])+\
', Room #' + str(room[0])
def main():
parser = optparse.OptionParser('usage %prog '+\
'-i <interface>')
parser.add_option('-i', dest='interface',\
type='string', help='specify interface to listen on')
(options, args) = parser.parse_args()
if options.interface == None:
print parser.usage
exit(0)
else:
conf.iface = options.interface
try:
print '[*] Starting Hotel Guest Sniffer.'
sniff(filter='tcp', prn=findGuest, store=0)
except KeyboardInterrupt:
exit(0)
if __name__ == '__main__':
main()
| {
"content_hash": "b2cac5e212b48a62db14aa081bd0a1b8",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 60,
"avg_line_length": 24.857142857142858,
"alnum_prop": 0.5643678160919541,
"repo_name": "psb-seclab/CTFStuff",
"id": "681c0eba68b673257a18ff920761b707a8d21491",
"size": "913",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils/violent_python_code/CH5/3-hotelSniff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1004"
},
{
"name": "Python",
"bytes": "210746"
}
],
"symlink_target": ""
} |
"""This code example creates new activity groups.
To determine which activity groups exist, run get_all_activity_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import uuid
# Import appropriate modules from the client library.
from googleads import ad_manager
# Set the ID of the advertiser company this activity group is associated with.
ADVERTISER_COMPANY_ID = 'INSERT_ADVERTISER_COMPANY_ID_HERE'
def main(client, advertiser_company_id):
# Initialize appropriate service.
activity_group_service = client.GetService('ActivityGroupService',
version='v201808')
# Create a short-term activity group.
short_term_activity_group = {
'name': 'Short-term activity group #%s' % uuid.uuid4(),
'companyIds': [advertiser_company_id],
'clicksLookback': '1',
'impressionsLookback': '1'
}
# Create a long-term activity group.
long_term_activity_group = {
'name': 'Long-term activity group #%s' % uuid.uuid4(),
'companyIds': [advertiser_company_id],
'clicksLookback': '30',
'impressionsLookback': '30'
}
# Create the activity groups on the server.
activity_groups = activity_group_service.createActivityGroups([
short_term_activity_group, long_term_activity_group])
# Display results.
for activity_group in activity_groups:
print ('Activity group with ID "%s" and name "%s" was created.'
% (activity_group['id'], activity_group['name']))
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, ADVERTISER_COMPANY_ID)
| {
"content_hash": "1e928b11ce5720373693986d09739ee9",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 78,
"avg_line_length": 33.8,
"alnum_prop": 0.6982248520710059,
"repo_name": "Aloomaio/googleads-python-lib",
"id": "195585f0beb24d0ed93e05870fbb03d6be671cac",
"size": "2481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/ad_manager/v201808/activity_group_service/create_activity_groups.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "491015"
}
],
"symlink_target": ""
} |
import io
import json
import pytest
import stix2
from ...exceptions import InvalidValueError
from .constants import IDENTITY_ID
EXPECTED_BUNDLE = """{
"type": "bundle",
"id": "bundle--00000000-0000-4000-8000-000000000007",
"spec_version": "2.0",
"objects": [
{
"type": "indicator",
"id": "indicator--00000000-0000-4000-8000-000000000001",
"created": "2017-01-01T12:34:56.000Z",
"modified": "2017-01-01T12:34:56.000Z",
"pattern": "[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']",
"valid_from": "2017-01-01T12:34:56Z",
"labels": [
"malicious-activity"
]
},
{
"type": "malware",
"id": "malware--00000000-0000-4000-8000-000000000003",
"created": "2017-01-01T12:34:56.000Z",
"modified": "2017-01-01T12:34:56.000Z",
"name": "Cryptolocker",
"labels": [
"ransomware"
]
},
{
"type": "relationship",
"id": "relationship--00000000-0000-4000-8000-000000000005",
"created": "2017-01-01T12:34:56.000Z",
"modified": "2017-01-01T12:34:56.000Z",
"relationship_type": "indicates",
"source_ref": "indicator--a740531e-63ff-4e49-a9e1-a0a3eed0e3e7",
"target_ref": "malware--9c4638ec-f1de-4ddb-abf4-1b760417654e"
}
]
}"""
EXPECTED_BUNDLE_DICT = {
"type": "bundle",
"id": "bundle--00000000-0000-4000-8000-000000000007",
"spec_version": "2.0",
"objects": [
{
"type": "indicator",
"id": "indicator--00000000-0000-4000-8000-000000000001",
"created": "2017-01-01T12:34:56.000Z",
"modified": "2017-01-01T12:34:56.000Z",
"pattern": "[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']",
"valid_from": "2017-01-01T12:34:56Z",
"labels": [
"malicious-activity",
],
},
{
"type": "malware",
"id": "malware--00000000-0000-4000-8000-000000000003",
"created": "2017-01-01T12:34:56.000Z",
"modified": "2017-01-01T12:34:56.000Z",
"name": "Cryptolocker",
"labels": [
"ransomware",
],
},
{
"type": "relationship",
"id": "relationship--00000000-0000-4000-8000-000000000005",
"created": "2017-01-01T12:34:56.000Z",
"modified": "2017-01-01T12:34:56.000Z",
"relationship_type": "indicates",
"source_ref": "indicator--a740531e-63ff-4e49-a9e1-a0a3eed0e3e7",
"target_ref": "malware--9c4638ec-f1de-4ddb-abf4-1b760417654e",
},
],
}
def test_empty_bundle():
bundle = stix2.v20.Bundle()
assert bundle.type == "bundle"
assert bundle.id.startswith("bundle--")
with pytest.raises(AttributeError):
assert bundle.objects
def test_bundle_with_wrong_type():
with pytest.raises(stix2.exceptions.InvalidValueError) as excinfo:
stix2.v20.Bundle(type="not-a-bundle")
assert excinfo.value.cls == stix2.v20.Bundle
assert excinfo.value.prop_name == "type"
assert excinfo.value.reason == "must equal 'bundle'."
assert str(excinfo.value) == "Invalid value for Bundle 'type': must equal 'bundle'."
def test_bundle_id_must_start_with_bundle():
with pytest.raises(stix2.exceptions.InvalidValueError) as excinfo:
stix2.v20.Bundle(id='my-prefix--')
assert excinfo.value.cls == stix2.v20.Bundle
assert excinfo.value.prop_name == "id"
assert excinfo.value.reason == "must start with 'bundle--'."
assert str(excinfo.value) == "Invalid value for Bundle 'id': must start with 'bundle--'."
def test_create_bundle_fp_serialize_pretty(indicator, malware, relationship):
bundle = stix2.v20.Bundle(objects=[indicator, malware, relationship])
buffer = io.StringIO()
bundle.fp_serialize(buffer, pretty=True)
assert bundle.serialize(pretty=True) == EXPECTED_BUNDLE
assert buffer.getvalue() == EXPECTED_BUNDLE
def test_create_bundle_fp_serialize_nonpretty(indicator, malware, relationship):
bundle = stix2.v20.Bundle(objects=[indicator, malware, relationship])
buffer = io.StringIO()
bundle.fp_serialize(buffer, sort_keys=True)
assert bundle.serialize(sort_keys=True) == json.dumps(json.loads(EXPECTED_BUNDLE), sort_keys=True)
assert buffer.getvalue() == json.dumps(json.loads(EXPECTED_BUNDLE), sort_keys=True)
def test_create_bundle1(indicator, malware, relationship):
bundle = stix2.v20.Bundle(objects=[indicator, malware, relationship])
assert bundle.serialize(pretty=True) == EXPECTED_BUNDLE
assert bundle.serialize(pretty=True) == EXPECTED_BUNDLE
def test_create_bundle2(indicator, malware, relationship):
bundle = stix2.v20.Bundle(objects=[indicator, malware, relationship])
assert json.loads(bundle.serialize()) == EXPECTED_BUNDLE_DICT
def test_create_bundle_with_positional_args(indicator, malware, relationship):
bundle = stix2.v20.Bundle(indicator, malware, relationship)
assert bundle.serialize(pretty=True) == EXPECTED_BUNDLE
def test_create_bundle_with_positional_listarg(indicator, malware, relationship):
bundle = stix2.v20.Bundle([indicator, malware, relationship])
assert bundle.serialize(pretty=True) == EXPECTED_BUNDLE
def test_create_bundle_with_listarg_and_positional_arg(indicator, malware, relationship):
bundle = stix2.v20.Bundle([indicator, malware], relationship)
assert bundle.serialize(pretty=True) == EXPECTED_BUNDLE
def test_create_bundle_with_listarg_and_kwarg(indicator, malware, relationship):
bundle = stix2.v20.Bundle([indicator, malware], objects=[relationship])
assert bundle.serialize(pretty=True) == EXPECTED_BUNDLE
def test_create_bundle_with_arg_listarg_and_kwarg(indicator, malware, relationship):
bundle = stix2.v20.Bundle([indicator], malware, objects=[relationship])
assert bundle.serialize(pretty=True) == EXPECTED_BUNDLE
def test_create_bundle_invalid(indicator, malware, relationship):
with pytest.raises(InvalidValueError) as excinfo:
stix2.v20.Bundle(objects=[1])
assert excinfo.value.reason == "This property may only contain a dictionary or object"
with pytest.raises(InvalidValueError) as excinfo:
stix2.v20.Bundle(objects=[{}])
assert excinfo.value.reason == "This property may only contain a non-empty dictionary or object"
with pytest.raises(InvalidValueError) as excinfo:
stix2.v20.Bundle(objects=[{'type': 'bundle'}])
assert excinfo.value.reason == 'This property may not contain a Bundle object'
@pytest.mark.parametrize("version", ["2.0"])
def test_parse_bundle(version):
bundle = stix2.parse(EXPECTED_BUNDLE, version=version)
assert bundle.type == "bundle"
assert bundle.id.startswith("bundle--")
assert isinstance(bundle.objects[0], stix2.v20.Indicator)
assert bundle.objects[0].type == 'indicator'
assert bundle.objects[1].type == 'malware'
assert bundle.objects[2].type == 'relationship'
def test_parse_unknown_type():
unknown = {
"type": "other",
"id": "other--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
"created": "2016-04-06T20:03:00Z",
"modified": "2016-04-06T20:03:00Z",
"created_by_ref": IDENTITY_ID,
"description": "Campaign by Green Group against a series of targets in the financial services sector.",
"name": "Green Group Attacks Against Finance",
}
with pytest.raises(stix2.exceptions.ParseError) as excinfo:
stix2.parse(unknown, version="2.0")
assert str(excinfo.value) == "Can't parse unknown object type 'other'! For custom types, use the CustomObject decorator."
def test_stix_object_property():
prop = stix2.properties.STIXObjectProperty(spec_version='2.0')
identity = stix2.v20.Identity(name="test", identity_class="individual")
assert prop.clean(identity, False) == (identity, False)
def test_bundle_with_different_spec_objects():
# This is a 2.0 case only...
data = [
{
"spec_version": "2.1",
"type": "indicator",
"id": "indicator--00000000-0000-4000-8000-000000000001",
"created": "2017-01-01T12:34:56.000Z",
"modified": "2017-01-01T12:34:56.000Z",
"pattern": "[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']",
"valid_from": "2017-01-01T12:34:56Z",
"labels": [
"malicious-activity",
],
},
{
"type": "malware",
"id": "malware--00000000-0000-4000-8000-000000000003",
"created": "2017-01-01T12:34:56.000Z",
"modified": "2017-01-01T12:34:56.000Z",
"name": "Cryptolocker",
"labels": [
"ransomware",
],
},
]
with pytest.raises(InvalidValueError) as excinfo:
stix2.v20.Bundle(objects=data)
assert "Spec version 2.0 bundles don't yet support containing objects of a different spec version." in str(excinfo.value)
def test_bundle_obj_id_found():
bundle = stix2.parse(EXPECTED_BUNDLE)
mal_list = bundle.get_obj("malware--00000000-0000-4000-8000-000000000003")
assert bundle.objects[1] == mal_list[0]
assert len(mal_list) == 1
@pytest.mark.parametrize(
"bundle_data", [{
"type": "bundle",
"id": "bundle--00000000-0000-4000-8000-000000000007",
"spec_version": "2.0",
"objects": [
{
"type": "indicator",
"id": "indicator--00000000-0000-4000-8000-000000000001",
"created": "2017-01-01T12:34:56.000Z",
"modified": "2017-01-01T12:34:56.000Z",
"pattern": "[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']",
"valid_from": "2017-01-01T12:34:56Z",
"labels": [
"malicious-activity",
],
},
{
"type": "malware",
"id": "malware--00000000-0000-4000-8000-000000000003",
"created": "2017-01-01T12:34:56.000Z",
"modified": "2017-01-01T12:34:56.000Z",
"name": "Cryptolocker1",
"labels": [
"ransomware",
],
},
{
"type": "malware",
"id": "malware--00000000-0000-4000-8000-000000000003",
"created": "2017-01-01T12:34:56.000Z",
"modified": "2017-12-21T12:34:56.000Z",
"name": "CryptolockerOne",
"labels": [
"ransomware",
],
},
{
"type": "relationship",
"id": "relationship--00000000-0000-4000-8000-000000000005",
"created": "2017-01-01T12:34:56.000Z",
"modified": "2017-01-01T12:34:56.000Z",
"relationship_type": "indicates",
"source_ref": "indicator--a740531e-63ff-4e49-a9e1-a0a3eed0e3e7",
"target_ref": "malware--9c4638ec-f1de-4ddb-abf4-1b760417654e",
},
],
}],
)
def test_bundle_objs_ids_found(bundle_data):
bundle = stix2.parse(bundle_data)
mal_list = bundle.get_obj("malware--00000000-0000-4000-8000-000000000003")
assert bundle.objects[1] == mal_list[0]
assert bundle.objects[2] == mal_list[1]
assert len(mal_list) == 2
def test_bundle_getitem_overload_property_found():
bundle = stix2.parse(EXPECTED_BUNDLE)
assert bundle.type == "bundle"
assert bundle['type'] == "bundle"
def test_bundle_getitem_overload_obj_id_found():
bundle = stix2.parse(EXPECTED_BUNDLE)
mal_list = bundle["malware--00000000-0000-4000-8000-000000000003"]
assert bundle.objects[1] == mal_list[0]
assert len(mal_list) == 1
def test_bundle_obj_id_not_found():
bundle = stix2.parse(EXPECTED_BUNDLE)
with pytest.raises(KeyError) as excinfo:
bundle.get_obj('non existent')
assert "does not match the id property of any of the bundle" in str(excinfo.value)
def test_bundle_getitem_overload_obj_id_not_found():
bundle = stix2.parse(EXPECTED_BUNDLE)
with pytest.raises(KeyError) as excinfo:
bundle['non existent']
assert "neither a property on the bundle nor does it match the id property" in str(excinfo.value)
| {
"content_hash": "d40689b45da67dce9d924337d8f7f594",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 125,
"avg_line_length": 35.226890756302524,
"alnum_prop": 0.6079039440203562,
"repo_name": "oasis-open/cti-python-stix2",
"id": "07fa24daccd95fa7686561703078452f7e32d813",
"size": "12576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stix2/test/v20/test_bundle.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1737742"
}
],
"symlink_target": ""
} |
"""Tests for the ``influxdb_get_postgresql_size`` management command."""
from django.test import TestCase
from mock import patch
from ..management.commands.influxdb_get_postgresql_size import Command
class InfluxdbGetPostgresqlSizeTestCase(TestCase):
"""Tests for the ``influxdb_get_postgresql_size`` management command."""
longMessage = True
def setUp(self):
self.patch_write_points = patch(
'influxdb_metrics.management.commands.influxdb_get_postgresql_size'
'.write_points')
self.patch_get_database_size = patch(
'influxdb_metrics.management.commands.influxdb_get_postgresql_size'
'.get_database_size')
self.mock_write_points = self.patch_write_points.start()
self.mock_get_database_size = self.patch_get_database_size.start()
def tearDown(self):
self.patch_get_database_size.stop()
self.patch_write_points.stop()
def test_command(self):
cmd = Command()
cmd.handle('db_role', 'db_user')
write_points_call_args = self.mock_write_points.call_args[0][0]
get_database_size_call_args = \
self.mock_get_database_size.call_args[0]
self.assertEqual(
write_points_call_args[0]['measurement'],
'postgresql_size', msg=(
'Should construct a data dict with the correct columns'))
self.assertEqual(
get_database_size_call_args,
('db_role', 'db_user'),
msg=('Should call `get_database_size` with correct parameters'))
| {
"content_hash": "c0a83c00f8ea8e4bcab87ad5ee0be4c5",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 79,
"avg_line_length": 38.19512195121951,
"alnum_prop": 0.6411238825031929,
"repo_name": "bitlabstudio/django-influxdb-metrics",
"id": "699364192709650441def323a22ddf3b2e089a93",
"size": "1566",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "influxdb_metrics/tests/influxdb_get_postgresql_size_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "508"
},
{
"name": "Makefile",
"bytes": "325"
},
{
"name": "Python",
"bytes": "29791"
},
{
"name": "Shell",
"bytes": "128"
}
],
"symlink_target": ""
} |
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class RelatedData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'alert_description': 'str',
'anomaly_chart_link': 'str',
'common_dimensions': 'list[str]',
'common_metrics': 'list[str]',
'common_sources': 'list[str]',
'enhanced_score': 'float',
'related_id': 'str',
'summary': 'str'
}
attribute_map = {
'alert_description': 'alertDescription',
'anomaly_chart_link': 'anomalyChartLink',
'common_dimensions': 'commonDimensions',
'common_metrics': 'commonMetrics',
'common_sources': 'commonSources',
'enhanced_score': 'enhancedScore',
'related_id': 'relatedId',
'summary': 'summary'
}
def __init__(self, alert_description=None, anomaly_chart_link=None, common_dimensions=None, common_metrics=None, common_sources=None, enhanced_score=None, related_id=None, summary=None, _configuration=None): # noqa: E501
"""RelatedData - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._alert_description = None
self._anomaly_chart_link = None
self._common_dimensions = None
self._common_metrics = None
self._common_sources = None
self._enhanced_score = None
self._related_id = None
self._summary = None
self.discriminator = None
if alert_description is not None:
self.alert_description = alert_description
if anomaly_chart_link is not None:
self.anomaly_chart_link = anomaly_chart_link
if common_dimensions is not None:
self.common_dimensions = common_dimensions
if common_metrics is not None:
self.common_metrics = common_metrics
if common_sources is not None:
self.common_sources = common_sources
if enhanced_score is not None:
self.enhanced_score = enhanced_score
if related_id is not None:
self.related_id = related_id
if summary is not None:
self.summary = summary
@property
def alert_description(self):
"""Gets the alert_description of this RelatedData. # noqa: E501
If this event is generated by an alert, the description of that alert. # noqa: E501
:return: The alert_description of this RelatedData. # noqa: E501
:rtype: str
"""
return self._alert_description
@alert_description.setter
def alert_description(self, alert_description):
"""Sets the alert_description of this RelatedData.
If this event is generated by an alert, the description of that alert. # noqa: E501
:param alert_description: The alert_description of this RelatedData. # noqa: E501
:type: str
"""
self._alert_description = alert_description
@property
def anomaly_chart_link(self):
"""Gets the anomaly_chart_link of this RelatedData. # noqa: E501
Chart Link of the anomaly to which this event is related # noqa: E501
:return: The anomaly_chart_link of this RelatedData. # noqa: E501
:rtype: str
"""
return self._anomaly_chart_link
@anomaly_chart_link.setter
def anomaly_chart_link(self, anomaly_chart_link):
"""Sets the anomaly_chart_link of this RelatedData.
Chart Link of the anomaly to which this event is related # noqa: E501
:param anomaly_chart_link: The anomaly_chart_link of this RelatedData. # noqa: E501
:type: str
"""
self._anomaly_chart_link = anomaly_chart_link
@property
def common_dimensions(self):
"""Gets the common_dimensions of this RelatedData. # noqa: E501
Set of common dimensions between the 2 events, presented in key=value format # noqa: E501
:return: The common_dimensions of this RelatedData. # noqa: E501
:rtype: list[str]
"""
return self._common_dimensions
@common_dimensions.setter
def common_dimensions(self, common_dimensions):
"""Sets the common_dimensions of this RelatedData.
Set of common dimensions between the 2 events, presented in key=value format # noqa: E501
:param common_dimensions: The common_dimensions of this RelatedData. # noqa: E501
:type: list[str]
"""
self._common_dimensions = common_dimensions
@property
def common_metrics(self):
"""Gets the common_metrics of this RelatedData. # noqa: E501
Set of common metrics/labels between the 2 events or anomalies # noqa: E501
:return: The common_metrics of this RelatedData. # noqa: E501
:rtype: list[str]
"""
return self._common_metrics
@common_metrics.setter
def common_metrics(self, common_metrics):
"""Sets the common_metrics of this RelatedData.
Set of common metrics/labels between the 2 events or anomalies # noqa: E501
:param common_metrics: The common_metrics of this RelatedData. # noqa: E501
:type: list[str]
"""
self._common_metrics = common_metrics
@property
def common_sources(self):
"""Gets the common_sources of this RelatedData. # noqa: E501
Set of common sources between the 2 events or anomalies # noqa: E501
:return: The common_sources of this RelatedData. # noqa: E501
:rtype: list[str]
"""
return self._common_sources
@common_sources.setter
def common_sources(self, common_sources):
"""Sets the common_sources of this RelatedData.
Set of common sources between the 2 events or anomalies # noqa: E501
:param common_sources: The common_sources of this RelatedData. # noqa: E501
:type: list[str]
"""
self._common_sources = common_sources
@property
def enhanced_score(self):
"""Gets the enhanced_score of this RelatedData. # noqa: E501
Enhanced score to sort related events and anomalies # noqa: E501
:return: The enhanced_score of this RelatedData. # noqa: E501
:rtype: float
"""
return self._enhanced_score
@enhanced_score.setter
def enhanced_score(self, enhanced_score):
"""Sets the enhanced_score of this RelatedData.
Enhanced score to sort related events and anomalies # noqa: E501
:param enhanced_score: The enhanced_score of this RelatedData. # noqa: E501
:type: float
"""
self._enhanced_score = enhanced_score
@property
def related_id(self):
"""Gets the related_id of this RelatedData. # noqa: E501
ID of the event to which this event is related # noqa: E501
:return: The related_id of this RelatedData. # noqa: E501
:rtype: str
"""
return self._related_id
@related_id.setter
def related_id(self, related_id):
"""Sets the related_id of this RelatedData.
ID of the event to which this event is related # noqa: E501
:param related_id: The related_id of this RelatedData. # noqa: E501
:type: str
"""
self._related_id = related_id
@property
def summary(self):
"""Gets the summary of this RelatedData. # noqa: E501
Text summary of why the two events are related # noqa: E501
:return: The summary of this RelatedData. # noqa: E501
:rtype: str
"""
return self._summary
@summary.setter
def summary(self, summary):
"""Sets the summary of this RelatedData.
Text summary of why the two events are related # noqa: E501
:param summary: The summary of this RelatedData. # noqa: E501
:type: str
"""
self._summary = summary
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RelatedData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RelatedData):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, RelatedData):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "2b72782accf26473c6e5a21f2fe9e890",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 409,
"avg_line_length": 33.12852664576803,
"alnum_prop": 0.6095760787282362,
"repo_name": "wavefrontHQ/python-client",
"id": "b2c107da1e525c18f85b09bd85aaeb0ecdd9e55d",
"size": "10585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wavefront_api_client/models/related_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4642252"
},
{
"name": "Shell",
"bytes": "3458"
}
],
"symlink_target": ""
} |
"""
Form Widget classes specific to the Django admin site.
"""
import copy
from django import forms
from django.forms.widgets import RadioFieldRenderer
from django.forms.util import flatatt
from django.utils.html import escape
from django.utils.text import truncate_words
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/core.js",
settings.ADMIN_MEDIA_PREFIX + "js/SelectBox.js",
settings.ADMIN_MEDIA_PREFIX + "js/SelectFilter2.js")
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
if attrs is None: attrs = {}
attrs['class'] = 'selectfilter'
if self.is_stacked: attrs['class'] += 'stacked'
output = [super(FilteredSelectMultiple, self).render(name, value, attrs, choices)]
output.append(u'<script type="text/javascript">addEvent(window, "load", function(e) {')
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'SelectFilter.init("id_%s", "%s", %s, "%s"); });</script>\n' % \
(name, self.verbose_name.replace('"', '\\"'), int(self.is_stacked), settings.ADMIN_MEDIA_PREFIX))
return mark_safe(u''.join(output))
class AdminDateWidget(forms.DateInput):
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js")
def __init__(self, attrs={}, format=None):
super(AdminDateWidget, self).__init__(attrs={'class': 'vDateField', 'size': '10'}, format=format)
class AdminTimeWidget(forms.TimeInput):
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js")
def __init__(self, attrs={}, format=None):
super(AdminTimeWidget, self).__init__(attrs={'class': 'vTimeField', 'size': '8'}, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return mark_safe(u'<p class="datetime">%s %s<br />%s %s</p>' % \
(_('Date:'), rendered_widgets[0], _('Time:'), rendered_widgets[1]))
class AdminRadioFieldRenderer(RadioFieldRenderer):
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return mark_safe(u'<ul%s>\n%s\n</ul>' % (
flatatt(self.attrs),
u'\n'.join([u'<li>%s</li>' % force_unicode(w) for w in self]))
)
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminFileWidget(forms.ClearableFileInput):
template_with_initial = (u'<p class="file-upload">%s</p>'
% forms.ClearableFileInput.template_with_initial)
template_with_clear = (u'<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.template_with_clear)
def url_params_from_lookup_dict(lookups):
"""
Converts the type of lookups specified in a ForeignKey limit_choices_to
attribute to a dictionary of query parameters
"""
params = {}
if lookups and hasattr(lookups, 'items'):
items = []
for k, v in lookups.items():
if isinstance(v, list):
v = u','.join([str(x) for x in v])
elif isinstance(v, bool):
# See django.db.fields.BooleanField.get_prep_lookup
v = ('0', '1')[v]
else:
v = unicode(v)
items.append((k, v))
params.update(dict(items))
return params
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
def __init__(self, rel, attrs=None, using=None):
self.rel = rel
self.db = using
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
related_url = '../../../%s/%s/' % (self.rel.to._meta.app_label, self.rel.to._meta.object_name.lower())
params = self.url_parameters()
if params:
url = u'?' + u'&'.join([u'%s=%s' % (k, v) for k, v in params.items()])
else:
url = u''
if "class" not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript looks for this hook.
output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)]
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'<a href="%s%s" class="related-lookup" id="lookup_id_%s" onclick="return showRelatedObjectLookupPopup(this);"> ' % \
(related_url, url, name))
output.append(u'<img src="%simg/admin/selector-search.gif" width="16" height="16" alt="%s" /></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Lookup')))
if value:
output.append(self.label_for_value(value))
return mark_safe(u''.join(output))
def base_url_parameters(self):
return url_params_from_lookup_dict(self.rel.limit_choices_to)
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.using(self.db).get(**{key: value})
return ' <strong>%s</strong>' % escape(truncate_words(obj, 14))
except (ValueError, self.rel.to.DoesNotExist):
return ''
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
attrs['class'] = 'vManyToManyRawIdAdminField'
if value:
value = ','.join([force_unicode(v) for v in value])
else:
value = ''
return super(ManyToManyRawIdWidget, self).render(name, value, attrs)
def url_parameters(self):
return self.base_url_parameters()
def label_for_value(self, value):
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
for pk1, pk2 in zip(initial, data):
if force_unicode(pk1) != force_unicode(pk2):
return True
return False
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
def __init__(self, widget, rel, admin_site, can_add_related=None):
self.is_hidden = widget.is_hidden
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.to in admin_site._registry
self.can_add_related = can_add_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
def _media(self):
return self.widget.media
media = property(_media)
def render(self, name, value, *args, **kwargs):
rel_to = self.rel.to
info = (rel_to._meta.app_label, rel_to._meta.object_name.lower())
try:
related_url = reverse('admin:%s_%s_add' % info, current_app=self.admin_site.name)
except NoReverseMatch:
info = (self.admin_site.root_path, rel_to._meta.app_label, rel_to._meta.object_name.lower())
related_url = '%s%s/%s/add/' % info
self.widget.choices = self.choices
output = [self.widget.render(name, value, *args, **kwargs)]
if self.can_add_related:
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'<a href="%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> ' % \
(related_url, name))
output.append(u'<img src="%simg/admin/icon_addlink.gif" width="10" height="10" alt="%s"/></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Add Another')))
return mark_safe(u''.join(output))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs)
return self.attrs
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def _has_changed(self, initial, data):
return self.widget._has_changed(initial, data)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
class AdminIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vCommaSeparatedIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
| {
"content_hash": "e88af2f2b3acc39b92988b95e06aa43a",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 157,
"avg_line_length": 40.78983050847457,
"alnum_prop": 0.6201279813845259,
"repo_name": "jamespacileo/django-france",
"id": "f210d4ec79783a2331b8bdb514a577dacb8b525c",
"size": "12033",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/contrib/admin/widgets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "98723"
},
{
"name": "Python",
"bytes": "6883445"
},
{
"name": "Shell",
"bytes": "4009"
}
],
"symlink_target": ""
} |
"""
Copyright 2016 Udey Rishi
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import subprocess
import signal
import logging
import sys
import os
from threading import Timer
from time import sleep
from bo.settings import BO_MANAGER_SETTINGS
BO_LAUNCH_COMMAND = 'scrapy crawl bo'
class BoManager:
"""
The main manager class for Bo that runs the scrapy crawler as a subprocess repeatedly indefinitely until SIGINT is
received.
"""
def __init__(self, logger, force_kill_delay_seconds=10):
self.__last_output = ''
self.__sigint_received = False
self.__bo_pid = None
self.__logger = logger
self.__final_kill_timer = Timer(force_kill_delay_seconds, self.__force_kill_bo)
self.__killed = False
signal.signal(signal.SIGINT, lambda sig, frame: self.__sigint_handler(sig, frame))
def run(self):
while not self.__sigint_received:
self.__logger.log(logging.CRITICAL, 'Starting Bo...')
rc = self.__run_command(BO_LAUNCH_COMMAND)
if rc == 0:
if self.__sigint_received:
break
delay = BO_MANAGER_SETTINGS.get('retry_delay_seconds', 10)
self.__logger.log(logging.CRITICAL, 'Restarting Bo in {0} seconds...'.format(delay))
sleep(delay)
else:
break
# Cancel the SIGINT call if properly terminated
self.__final_kill_timer.cancel()
self.__logger.log(logging.CRITICAL, 'Bo successfully shut down')
def __run_command(self, command):
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE, preexec_fn=os.setpgrp)
self.__bo_pid = process.pid
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
print(output)
self.__last_output = output
return process.poll()
def __sigint_handler(self, sig, frame):
if not self.__killed:
self.__killed = True
self.__logger.log(logging.CRITICAL, 'SIGINT received. Shutting down scrapy')
self.__sigint_received = True
if self.__bo_pid is not None:
self.__kill_bo()
self.__final_kill_timer.start()
def __force_kill_bo(self):
self.__logger.log(logging.CRITICAL, 'Scrapy shut down not responding. Trying force kill')
self.__kill_bo()
def __kill_bo(self):
subprocess.call(['kill', '-SIGINT', str(self.__bo_pid)])
def configure_logger():
root = logging.getLogger()
root.setLevel(logging.getLevelName(BO_MANAGER_SETTINGS.get('log_level')))
ch = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s [BO MANAGER] [%(name)s] [%(levelname)s]: %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
return root
def main():
logger = configure_logger()
bo_manager = BoManager(logger, BO_MANAGER_SETTINGS.get('force_kill_delay_seconds', 10))
bo_manager.run()
if __name__ == '__main__':
main()
| {
"content_hash": "15bea9aa22c8f77ea7ab414fe6ee8856",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 118,
"avg_line_length": 33.330275229357795,
"alnum_prop": 0.628131021194605,
"repo_name": "udeyrishi/bo",
"id": "fa046fa3a18ba1ab3ea132cc26b74ed9764ab126",
"size": "3656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bo/bo.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35384"
}
],
"symlink_target": ""
} |
from meta import Album, Track, Artist
class MyMusicAlbum(Album):
type = "mymusic_album"
fields = ['mymusic_track_count', 'added_at', 'title', 'total_tracks', 'duration', 'release_date', 'release_format', 'rating', 'popularity', 'streamable', 'artist_display_name']
refs = ['artists', 'label', 'tracks', 'user']
def __init__(self, **data):
super(MyMusicAlbum, self).__init__(**data)
@property
def relative_path(self):
return "users/{0}/mymusic/{1}".format(self.user.identifier, self.identifier)
def default_image_url(self, size):
return "http://im.api.beatsmusic.com/api/albums/{0}/images/default?size={1}".format(self.identifier, size)
def get_tracks(self, api, **kwargs):
return self._get_authed_collection(api,'tracks', **kwargs)
class MyMusicTrack(Track):
type = "mymusic_track"
fields = ['added_at', 'title', 'disc_number', 'parental_advisory', 'duration', 'track_position', 'popularity', 'streamable', 'artist_display_name']
refs = ['artists', 'label', 'tracks', 'user']
def __init__(self, **data):
super(MyMusicTrack, self).__init__(**data)
@property
def relative_path(self):
return "users/{0}/mymusic/{1}".format(self.user.identifier, self.identifier)
def default_image_url(self, size):
return "http://im.api.beatsmusic.com/api/tracks/{0}/images/default?size={1}".format(self.identifier, size)
class MyMusicArtist(Artist):
type = "mymusic_artist"
fields = ['name', 'popularity', 'total_singles', 'total_eps', 'total_lps', 'total_freeplays', 'total_compilations', 'streamable', 'total_albums', 'total_tracks']
refs = ['artists', 'album', 'user']
def __init__(self, **data):
super(MyMusicArtist, self).__init__(**data)
@property
def relative_path(self):
return "users/{0}/mymusic/{1}".format(self.user.identifier, self.identifier)
def default_image_url(self, size):
return "http://im.api.beatsmusic.com/api/artists/{0}/images/default?size={1}".format(self.identifier, size)
def get_albums(self, api, **kwargs):
return self._get_authed_collection(api,'albums', **kwargs)
def get_tracks(self, api, **kwargs):
return self._get_authed_collection(api,'tracks', **kwargs)
| {
"content_hash": "9e567a5a0f1be9e4a838a7230b4d37c1",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 180,
"avg_line_length": 41.41818181818182,
"alnum_prop": 0.6439859525899913,
"repo_name": "imsparsh/pybeats",
"id": "f1ca79aa68d7d90578a27cbdd8904a5e4845b90b",
"size": "2278",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pybeats/model/library.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "39296"
}
],
"symlink_target": ""
} |
import serial
import tornado.websocket
import tornado.ioloop
ser = serial.Serial(2) # open serial COM3 (change this to whatever port your uC is connected to)
print(ser.portstr) # check which port was really used (issues on windows)
line = 0
listeners = [] #list that will hold all the current connections
#handles all the connections.
class WebSocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
listeners.append(self)
def on_close(self):
listeners.remove(self)
application = tornado.web.Application([
(r"/websocket", WebSocketHandler),
])
def main():
global line
line=ser.readline()
#print(listeners) <-- for debugging connection
for i in range(0, len(listeners)):
listeners[i].write_message(str(line, 'utf-8'))
if __name__ == "__main__":
application.listen(8888)
while(1):
tornado.ioloop.IOLoop.instance().run_sync(main)
| {
"content_hash": "62fbb73c4ae6e9ce3a5fd67186091a33",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 98,
"avg_line_length": 27.235294117647058,
"alnum_prop": 0.6857451403887689,
"repo_name": "Sulter/arduino-oxymeter",
"id": "dea17094b281f802ed9d64dc1055ea5459aff266",
"size": "926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3660"
},
{
"name": "JavaScript",
"bytes": "2260"
},
{
"name": "Python",
"bytes": "926"
}
],
"symlink_target": ""
} |
import inspect
import os
import textwrap
import types
import warnings
from .reference import Reference
from .shim import ModuleShim
class XTracebackFrame(object):
FILTER = ("__builtins__", "__all__", "__doc__", "__file__", "__name__",
"__package__", "__path__", "__loader__", "__cached__")
FUNCTION_EXCLUDE = ("GeneratorContextManager.__exit__",)
GLOBALS_PREFIX = "g:"
def __init__(self, xtb, frame, frame_info, tb_index):
self.xtb = xtb
self.frame = frame
self.frame_info = frame_info
self.tb_index = tb_index
(self.filename, self.lineno, self.function,
self.code_context, self.index) = self.frame_info
self.args, self.varargs, self.varkw = inspect.getargs(frame.f_code)
# keep track of what we've formatted in this frame
self.formatted_vars = {}
# we use a filtered copy of locals and globals
self.locals = self._filter(frame.f_locals)
self.globals = self._filter(frame.f_globals)
# filter globals
if self.xtb.options.globals_module_include is not None:
for key, value in self.globals.items():
assert key not in self.FILTER
if isinstance(value, types.ModuleType):
module = value.__name__
elif isinstance(value, types.InstanceType):
module = value.__class__.__module__
else:
module = getattr(value, "__module__", None)
if (module is not None \
and not module.startswith(
self.xtb.options.globals_module_include
)):
del self.globals[key]
# if path is a real path then try to shorten it
if os.path.exists(self.filename):
self.filename = self.xtb._format_filename(
os.path.abspath(self.filename)
)
# qualify method name with class name
if self.xtb.options.qualify_methods and self.args:
try:
cls = frame.f_locals[self.args[0]]
except KeyError: # pragma: no cover - defensive
# we're assuming that the first argument is in f_locals but
# it may not be in some cases so this is a defence, see
# https://github.com/ischium/xtraceback/issues/3 with further
# detail at http://www.sqlalchemy.org/trac/ticket/2317 and
# https://dev.entrouvert.org/issues/765
pass
except TypeError: # pragma: no cover - defensive
# if self.args[0] is a list it is not hashable - inspect.getargs
# may return nested lists for args
pass
else:
if not isinstance(cls, type):
cls = type(cls)
if hasattr(cls, self.function):
for base in inspect.getmro(cls):
if self.function in base.__dict__:
self.function = base.__name__ + "." + self.function
break
self._formatted = None
@property
def exclude(self):
return self.locals.get("__xtraceback_skip_frame__", False) \
or self.function in self.FUNCTION_EXCLUDE
def _filter(self, fdict):
try:
fdict = fdict.copy()
except NotImplementedError:
# user data types inheriting dict may not have implemented copy
pass
else:
to_remove = []
for key, value in fdict.items():
try:
if key in self.FILTER:
to_remove.append(key)
continue
except:
exc_info = sys.exc_info()
# the comparison failed for an unknown reason likely a
# custom __cmp__ that makes bad assumptions - swallow
try:
warnings.warn("Could not filter %r: %r" % (key, exc_info[1]))
except:
warnings.warn("Could not filter and can't say why: %s" % exc_info[1])
continue
else:
# replace some values with shim types
if isinstance(value, types.ModuleType):
value = ModuleShim.get_instance(value, self.xtb)
# replace objects from further up the stack with a Marker
oid = id(value)
stack_ref = self.xtb.seen.get(oid)
if stack_ref is not None:
marker = stack_ref.marker(self.xtb, self.tb_index, key)
if marker.tb_offset != 0:
value = marker
else:
self.xtb.seen[oid] = Reference(self.tb_index, key, value)
if isinstance(value, dict):
value = self._filter(value)
fdict[key] = value
for key in to_remove:
del fdict[key]
return fdict
def _format_variable(self, lines, key, value, indent=4, prefix=""):
if value is not self.formatted_vars.get(key):
self.formatted_vars[key] = value
if self.globals.get(key) is value:
prefix = self.GLOBALS_PREFIX + prefix
lines.append(self.xtb._format_variable(key, value, indent, prefix))
def _format_dict(self, odict, indent=4):
lines = []
for key in sorted(odict.keys()):
self._format_variable(lines, key, odict[key], indent)
return lines
def _format_frame(self):
lines = [' File "%s", line %d, in %s' % (self.filename, self.lineno,
self.function)]
# push frame args
if self.xtb.options.show_args:
for arg in self.args:
if isinstance(arg, list):
# TODO: inspect.getargs arg list may contain nested lists;
# skip it for now
continue
self._format_variable(lines, arg, self.locals.get(arg))
if self.varargs:
self._format_variable(lines, self.varargs,
self.locals.get(self.varargs), prefix="*")
if self.varkw:
self._format_variable(lines, self.varkw,
self.locals.get(self.varkw), prefix="**")
# push globals
if self.xtb.options.show_globals:
lines.extend(self._format_dict(self.globals))
# push context lines
if self.code_context is not None:
lineno = self.lineno - self.index
dedented = textwrap.dedent("".join(self.code_context))
for line in dedented.splitlines():
numbered_line = " %s" % "%*s %s" % (self.xtb.number_padding,
lineno,
line)
if lineno == self.lineno:
if self.xtb.options.context > 1:
# push the numbered line with a marker
dedented_line = numbered_line.lstrip()
marker_padding = len(numbered_line) \
- len(dedented_line) - 2
lines.append("%s> %s" % ("-" * marker_padding,
dedented_line))
else:
# push the line only
lines.append(" " + line)
# push locals below lined up with the start of code
if self.xtb.options.show_locals:
indent = self.xtb.number_padding + len(line) \
- len(line.lstrip()) + 5
lines.extend(self._format_dict(self.locals, indent))
else:
# push the numbered line
lines.append(numbered_line)
lineno += 1
elif self.xtb.options.show_locals:
# no context so we are execing
lines.extend(self._format_dict(self.locals))
return "\n".join(lines)
def __str__(self):
if self._formatted is None:
self._formatted = self._format_frame()
return self._formatted
| {
"content_hash": "b2861a42753ca51b382e46259cd9e503",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 93,
"avg_line_length": 39.72811059907834,
"alnum_prop": 0.4876464447279898,
"repo_name": "Hypernode/xtraceback",
"id": "8778f9cb7493077454db9de4f6b769d38b244043",
"size": "8621",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "xtraceback/xtracebackframe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "419674"
}
],
"symlink_target": ""
} |
def extract희노애락(item):
"""
Parser for '희노애락'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'WATTT' in item['tags']:
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False
| {
"content_hash": "5aca938a99683057df72e72ed15be1e6",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 89,
"avg_line_length": 33.4,
"alnum_prop": 0.7035928143712575,
"repo_name": "fake-name/ReadableWebProxy",
"id": "b2e5819387076fd39c0f48060a56ca78031f7cb7",
"size": "350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extract희노애락.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth import authenticate
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.forms.models import modelformset_factory, inlineformset_factory
from slugify import slugify
import accounts.models
import accounts.utils
def check_email_domain_is_valid(email):
if email.split("@")[-1].strip() in settings.BLACKLISTED_DOMAINS:
raise forms.ValidationError(_("This email address is invalid."))
return True
class LoginForm(forms.Form):
email = forms.EmailField(
label=_("Email"),
widget=forms.EmailInput(attrs={'autofocus': 'autofocus', 'id': 'login_email', 'Placeholder': 'Email'}))
password = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput(attrs={'id': 'login_password', 'placeholder': 'Password'}))
def clean(self):
try:
email = self.cleaned_data['email']
password = self.cleaned_data['password']
user = get_user_model().objects.get(email__iexact=email, is_active=True)
try:
self.authed_user = authenticate(
username=user.username,
password=password,
)
except ValueError:
self.authed_user = None
if self.authed_user:
return self.cleaned_data
except (get_user_model().DoesNotExist, KeyError):
pass
raise forms.ValidationError("Your login details were incorrect. Please try again.")
def get_user(self):
return self.authed_user
class RegistrationForm(forms.Form):
first_name = forms.CharField(label=_('First name'), required=True, widget=forms.TextInput(
attrs={
'id': 'register_first_name',
'class': 'validate black-text'
}))
last_name = forms.CharField(label=_('Last name'), required=True, widget=forms.TextInput(
attrs={
'id': 'register_first_name',
'class': 'validate black-text'
}))
email = forms.EmailField(label=_("Email"), required=True, widget=forms.EmailInput(
attrs={
'class': 'validate black-text',
'id': 'register_email',
}))
password = forms.CharField(label=_("Password"), required=True, widget=forms.PasswordInput(
attrs={
'id': 'register_password',
'class': 'validate black-text',
}))
retype_password = forms.CharField(label=_("Repeat Password"), required=True, widget=forms.PasswordInput(
attrs={
'id': 'repeat_password',
'class': 'validate black-text',
}))
profile_type = forms.ChoiceField(choices=accounts.models.Choices.Profiles.PROFILE_CHOICES)
def clean(self):
cleaned_data = super(RegistrationForm, self).clean()
self._validate_password()
return cleaned_data
def clean_first_name(self):
first_name = self.cleaned_data['first_name']
if first_name.strip() == "" or first_name.strip() is None:
raise forms.ValidationError(_("First name cannot be empty."))
return first_name
def clean_last_name(self):
first_name = self.cleaned_data['last_name']
if first_name.strip() == "" or first_name.strip() is None:
raise forms.ValidationError(_("Last name cannot be empty."))
return first_name
def clean_email(self, raise_on_duplicate=True):
email = self.cleaned_data['email'].lower().strip().rstrip(".")
try:
self.user = get_user_model().objects.get(email__iexact=email)
except get_user_model().DoesNotExist:
pass
except get_user_model().MultipleObjectsReturned:
raise forms.ValidationError(_("There is already an account with that email address."))
else:
if raise_on_duplicate or self.user.has_usable_password():
raise forms.ValidationError(_("There is already an account with that email address."))
check_email_domain_is_valid(email)
return email
def clean_password(self):
password = self.cleaned_data['password'].strip()
if not password:
self.add_error('password', _("The password cannot be empty"))
return password
def clean_retype_password(self):
retype_password = self.cleaned_data['retype_password'].strip()
if not retype_password:
self.add_error('retype_password', _("The password cannot be empty"))
self._validate_password()
return retype_password
def _validate_password(self):
password = self.cleaned_data.get('password')
retype_password = self.cleaned_data.get('retype_password')
if password != retype_password:
self.add_error('password', _("The passwords do not match"))
| {
"content_hash": "c35b967bc6da09a4c27b946ed7122734",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 111,
"avg_line_length": 35.333333333333336,
"alnum_prop": 0.5992531446540881,
"repo_name": "tiagoarasilva/django-boilerplate",
"id": "5409a362074f30728b83a6e4a5153a6675302f9a",
"size": "5088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_name/accounts/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "109"
},
{
"name": "Dockerfile",
"bytes": "4856"
},
{
"name": "HTML",
"bytes": "10604"
},
{
"name": "Makefile",
"bytes": "1300"
},
{
"name": "Python",
"bytes": "89856"
}
],
"symlink_target": ""
} |
from msrest.pipeline import ClientRawResponse
from msrest.exceptions import HttpOperationError
from .. import models
class AvailabilitySets(object):
"""AvailabilitySets operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def update(
self, resource_group_name, avset, tags, custom_headers=None, raw=False, **operation_config):
"""
Updates the tags for an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param avset: The name of the storage availability set.
:type avset: str
:param tags:
:type tags: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
tags1 = models.AvailabilitySetUpdateParameters(tags=tags)
# Construct URL
url = '/parameterFlattening/{resourceGroupName}/{availabilitySetName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'availabilitySetName': self._serialize.url("avset", avset, 'str', max_length=80)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(tags1, 'AvailabilitySetUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| {
"content_hash": "a58ebce8e630542a6f256ebf96ac95dd",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 104,
"avg_line_length": 36.51315789473684,
"alnum_prop": 0.654054054054054,
"repo_name": "sharadagarwal/autorest",
"id": "a45da1f5bfd72ae801e1606a0a92009ec326964d",
"size": "3249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/ParameterFlattening/autorestparameterflattening/operations/availability_sets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "12942"
},
{
"name": "C#",
"bytes": "11450022"
},
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "274"
},
{
"name": "Java",
"bytes": "4693719"
},
{
"name": "JavaScript",
"bytes": "4685941"
},
{
"name": "PowerShell",
"bytes": "29614"
},
{
"name": "Python",
"bytes": "2274436"
},
{
"name": "Ruby",
"bytes": "232193"
},
{
"name": "Shell",
"bytes": "423"
},
{
"name": "TypeScript",
"bytes": "179577"
}
],
"symlink_target": ""
} |
class NullClass:
def __getattr__(self,name): return lambda *x,**y: None
# -------------------------------------------
# Example: switched writing to a logfile
log=err=NullClass()
if verbose:
log = open('/tmp/log')
err = open('/tmp/err')
log.write('blabla')
err.write('blabla error')
#This obviously avoids the usual pollution from stuff like "if verbose: ".
#NullClass also accepts keyword arguments.
| {
"content_hash": "521522873b02e3eef3bc1961bc7fc6f2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 74,
"avg_line_length": 26,
"alnum_prop": 0.6225961538461539,
"repo_name": "ActiveState/code",
"id": "39c8b409dbfd1ef3456c5d6de2106e8a17a8bef4",
"size": "505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/119222_NullClass_accepts_every_functicall/recipe-119222.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
"""Test mempool re-org scenarios.
Test re-org scenarios with a mempool that contains transactions
that spend (directly or indirectly) coinbase transactions.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
# Start with a 200 block chain
assert_equal(self.nodes[0].getblockcount(), 200)
# Mine four blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = create_tx(self.nodes[0], coinbase_txids[1], node1_address, 49.99)
spend_102_raw = create_tx(self.nodes[0], coinbase_txids[2], node0_address, 49.99)
spend_103_raw = create_tx(self.nodes[0], coinbase_txids[3], node0_address, 49.99)
# Create a transaction which is time-locked to two blocks in the future
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 49.99})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
# This will raise an exception because the timelock transaction is too immature to spend
assert_raises_jsonrpc(-26, "non-final", self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
# Time-locked transaction is still too immature to spend
assert_raises_jsonrpc(-26,'non-final', self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_tx(self.nodes[0], spend_102_id, node1_address, 49.98)
spend_103_1_raw = create_tx(self.nodes[0], spend_103_id, node1_address, 49.98)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
# Time-locked transaction can now be spent
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
for node in self.nodes:
node.invalidateblock(last_block[0])
# Time-locked transaction is now too immature and has been removed from the mempool
# spend_103_1 has been re-orged out of the chain and is back in the mempool
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| {
"content_hash": "e8bef6c782458f0ef99fd17a11775dc4",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 122,
"avg_line_length": 46.15533980582524,
"alnum_prop": 0.6503996634413126,
"repo_name": "bitreserve/bitcoin",
"id": "812b54ffcb3234ebd1c0c91a301f6874c2c205a6",
"size": "4968",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/mempool_reorg.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "781711"
},
{
"name": "C++",
"bytes": "4116849"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "3792"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "Makefile",
"bytes": "71875"
},
{
"name": "Objective-C",
"bytes": "2162"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "653389"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Shell",
"bytes": "26024"
}
],
"symlink_target": ""
} |
"""Migrates Feed-based sitelinks at Campaign level to use extension settings.
To learn more about extensionsettings, see:
https://developers.google.com/adwords/api/docs/guides/extension-settings.
To learn more about migrating Feed-based extensions to extension settings, see:
https://developers.google.com/adwords/api/docs/guides/migrate-to-extension-settings
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: FeedService.get, FeedMappingService.get, FeedItemService.get
Tags: CampaignExtensionSettingService.mutate, CampaignFeedService.get,
Tags: CampaignFeedService.mutate
Api: AdWordsOnly
"""
__author__ = 'Mark Saniscalchi'
from googleads import adwords
# The placeholder type for sitelinks. For the list of all supported placeholder
# types, see:
# https://developers.google.com/adwords/api/docs/appendix/placeholders
PLACEHOLDER_TYPE_SITELINKS = 1
# The placeholder field IDs for sitelinks. For the list of all supported
# placeholder types, see:
# https://developers.google.com/adwords/api/docs/appendix/placeholders
SITE_LINK_FIELDS = {
'TEXT': 1,
'URL': 2,
'LINE2': 3,
'LINE3': 4,
'FINAL_URLS': 5,
'FINAL_MOBILE_URLS': 6,
'TRACKING_URL_TEMPLATE': 7
}
PAGE_SIZE = 500
def CreateExtensionSetting(client, feed_items, campaign_feed, feed_item_ids,
platform_restrictions=None):
"""Creates the extension setting for a list of Feed Items.
Args:
client: an AdWordsClient instance.
feed_items: the list of all Feed Items.
campaign_feed: the original Campaign Feed.
feed_item_ids: the Ids of the feed items for which extension settings should
be created.
platform_restrictions: an optional Platform Restriction for the Feed items.
"""
campaign_extension_setting_service = client.GetService(
'CampaignExtensionSettingService', 'v201502')
extension_feed_items = [{
CreateSitelinkFeedItem(feed_items, feed_item_id)
} for feed_item_id in feed_item_ids]
extension_setting = {
'extensions': extension_feed_items
}
if platform_restrictions:
extension_setting['platformRestrictions'] = platform_restrictions
campaign_extension_setting = {
'campaignId': campaign_feed['campaignId'],
'extensionType': 'SITELINK',
'extensionSetting': extension_setting
}
operation = {
'operand': campaign_extension_setting,
'operator': 'ADD'
}
campaign_extension_setting_service.mutate([operation])
def CreateSitelinkFeedItem(feed_items, feed_item_id):
"""Creates a Sitelink Feed Item.
Args:
feed_items: a list of all Feed Items.
feed_item_id: the Id of a specific Feed Item for which a Sitelink Feed Item
should be created.
Returns:
The new Sitelink Feed Item.
"""
site_link_from_feed = feed_items[feed_item_id]
site_link_feed_item = {
'sitelinkText': site_link_from_feed['text'],
'sitelinkLine2': site_link_from_feed['line2'],
'sitelinkLine3': site_link_from_feed['line3'],
'scheduling': site_link_from_feed['scheduling']
}
if 'finalUrls' in site_link_from_feed and site_link_from_feed['finalUrls']:
site_link_feed_item['sitelinkFinalUrls'] = {
'urls': site_link_from_feed['finalUrls']
}
if 'finalMobileUrls' in site_link_from_feed:
site_link_feed_item['sitelinkFinalMobileUrls'] = {
'urls': site_link_from_feed['finalMobileUrls']
}
site_link_feed_item['sitelinkTrackingUrlTemplate'] = (
site_link_from_feed['trackingUrlTemplate'])
else:
site_link_feed_item['sitelinkUrl'] = site_link_from_feed['url']
return site_link_feed_item
def DeleteCampaignFeed(client, campaign_feed):
"""Deletes a campaign feed.
Args:
client: an AdWordsClient instance.
campaign_feed: the campaign feed to delete.
"""
campaign_feed_service = client.GetService('CampaignFeedService', 'v201502')
operation = {
'operand': campaign_feed,
'operator': 'REMOVE'
}
campaign_feed_service.mutate([operation])
def DeleteOldFeedItems(client, feed_item_ids, feed):
"""Deletes the old feed items for which extension settings have been created.
Args:
client: an AdWordsClient instance.
feed_item_ids: a list of Feed Item Ids.
feed: the Feed containing the given Feed Item Ids.
"""
if not feed_item_ids:
return
feed_item_service = client.GetService('FeedItemService', 'v201502')
operations = [{
'operator': 'REMOVE',
'operand': {
'feedId': feed['id'],
'feedItemId': feed_item_id
}
} for feed_item_id in feed_item_ids]
feed_item_service.mutate(operations)
def GetCampaignFeeds(client, feed, placeholder_type):
"""Get a list of Feed Item Ids used by a campaign via a given Campaign Feed.
Args:
client: an AdWordsClient instance.
feed: a Campaign Feed.
placeholder_type: the Placeholder Type.
Returns:
A list of Feed Item Ids.
"""
campaign_feed_service = client.GetService('CampaignFeedService', 'v201502')
campaign_feeds = []
more_pages = True
selector = {
'fields': ['CampaignId', 'MatchingFunction', 'PlaceholderTypes'],
'predicates': [
{
'field': 'Status',
'operator': 'EQUALS',
'values': ['ENABLED']
},
{
'field': 'FeedId',
'operator': 'EQUALS',
'values': [feed['id']]
},
{
'field': 'PlaceholderTypes',
'operator': 'CONTAINS_ANY',
'values': [placeholder_type]
}
],
'paging': {
'startIndex': 0,
'numberResults': PAGE_SIZE
}
}
while more_pages:
page = campaign_feed_service.get(selector)
if 'entries' in page:
campaign_feeds.extend(page['entries'])
selector['paging']['startIndex'] += PAGE_SIZE
more_pages = selector['paging']['startIndex'] < int(page['totalNumEntries'])
return campaign_feeds
def GetFeeds(client):
"""Returns a list of all enabled Feeds.
Args:
client: an AdWordsClient instance.
Returns:
A list containing all enabled Feeds.
"""
feed_service = client.GetService('FeedService', 'v201502')
feeds = []
more_pages = True
selector = {
'fields': ['Id', 'Name', 'Attributes'],
'predicates': [
{
'field': 'Origin',
'operator': 'EQUALS',
'values': ['USER']
},
{
'field': 'FeedStatus',
'operator': 'EQUALS',
'values': ['ENABLED']
}
],
'paging': {
'startIndex': 0,
'numberResults': PAGE_SIZE
}
}
while more_pages:
page = feed_service.get(selector)
if 'entries' in page:
feeds.extend(page['entries'])
selector['paging']['startIndex'] += PAGE_SIZE
more_pages = selector['paging']['startIndex'] < int(page['totalNumEntries'])
return feeds
def GetFeedItems(client, feed):
"""Returns the Feed Items for a given Feed.
Args:
client: an AdWordsClient instance.
feed: the Feed we are retrieving Feed Items from.
Returns:
The Feed Items associated with the given Feed.
"""
feed_item_service = client.GetService('FeedItemService', 'v201502')
feed_items = []
more_pages = True
selector = {
'fields': ['FeedItemId', 'AttributeValues', 'Scheduling'],
'predicates': [
{
'field': 'Status',
'operator': 'EQUALS',
'values': ['ENABLED']
},
{
'field': 'FeedId',
'operator': 'EQUALS',
'values': [feed['id']]
}
],
'paging': {
'startIndex': 0,
'numberResults': PAGE_SIZE
}
}
while more_pages:
page = feed_item_service.get(selector)
if 'entries' in page:
feed_items.extend(page['entries'])
selector['paging']['startIndex'] += PAGE_SIZE
more_pages = selector['paging']['startIndex'] < int(page['totalNumEntries'])
return feed_items
def GetFeedItemIdsForCampaign(campaign_feed):
"""Gets the Feed Item Ids used by a campaign through a given Campaign Feed.
Args:
campaign_feed: the Campaign Feed we are retrieving Feed Item Ids from.
Returns:
A list of Feed Item IDs.
"""
feed_item_ids = set()
if ('lhsOperand' in campaign_feed['matchingFunction'] and
campaign_feed['matchingFunction']['lhsOperand'][0]['xsi_type'] ==
'RequestContextOperand'):
request_context_operand = campaign_feed['matchingFunction']['lhsOperand'][0]
if (request_context_operand['contextType'] == 'FEED_ITEM_ID' and
campaign_feed['matchingFunction']['operator'] == 'IN'):
for argument in campaign_feed['matchingFunction']['rhsOperand']:
if argument['xsi_type'] == 'ConstantOperand':
feed_item_ids.add(argument['longValue'])
return feed_item_ids
def GetFeedMapping(client, feed, placeholder_type):
"""Gets the Feed Mapping for a given Feed.
Args:
client: an AdWordsClient instance.
feed: the Feed we are retrieving the Feed Mapping for.
placeholder_type: the Placeholder Type we are looking for.
Returns:
A dictionary containing the Feed Mapping.
"""
feed_mapping_service = client.GetService('FeedMappingService', 'v201502')
attribute_mappings = {}
more_pages = True
selector = {
'fields': ['FeedMappingId', 'AttributeFieldMappings'],
'predicates': [
{
'field': 'FeedId',
'operator': 'EQUALS',
'values': [feed['id']]
},
{
'field': 'PlaceholderType',
'operator': 'EQUALS',
'values': [placeholder_type]
}
],
'paging': {
'startIndex': 0,
'numberResults': PAGE_SIZE
}
}
while more_pages:
page = feed_mapping_service.get(selector)
if 'entries' in page:
# Normally, a feed attribute is mapped only to one field. However, you may
# map it to more than one field if needed.
for feed_mapping in page['entries']:
for attribute_mapping in feed_mapping['attributeFieldMappings']:
# Since attribute mappings can have multiple values for each key,
# we use a list to store the values.
if attribute_mapping['feedAttributeId'] in attribute_mappings:
attribute_mappings[attribute_mapping['feedAttributeId']].append(
attribute_mapping['fieldId'])
else:
attribute_mappings[attribute_mapping['feedAttributeId']] = [
attribute_mapping['fieldId']]
selector['paging']['startIndex'] += PAGE_SIZE
more_pages = selector['paging']['startIndex'] < int(page['totalNumEntries'])
return attribute_mappings
def GetPlatformRestrictions(campaign_feed):
"""Get the Platform Restrictions for a given Campaign Feed.
Args:
campaign_feed: the Campaign Feed we are retreiving Platform Restrictons for.
Returns:
The Platform Restrictions for the given feed.
"""
platform_restrictions = None
if campaign_feed['matchingFunction']['operator'] == 'AND':
for argument in campaign_feed['matchingFunction']['lhsOperand']:
# Check if matchingFunction is EQUALS(CONTEXT.DEVICE, 'Mobile')
if argument['value']['operator'] == 'EQUALS':
request_context_operand = argument['value']['lhsOperand'][0]
if (request_context_operand and
request_context_operand == 'DEVICE_PLATFORM'):
# This needs to be capitalized for ExtensionSettingPlatform.
platform_restrictions = argument['value']['rhsOperand'][0].upper()
return platform_restrictions
def GetSitelinksFromFeed(client, feed):
"""Gets the sitelinks from a feed.
Args:
client: an AdWordsClient instance.
feed: the feed used to retrieve sitelinks.
Returns:
A dictionary mapping the feed item ID to SiteLinkFromFeed.
"""
# Retrieve the feed's attribute mapping.
feed_mappings = GetFeedMapping(client, feed, PLACEHOLDER_TYPE_SITELINKS)
feed_items = {}
for feed_item in GetFeedItems(client, feed):
site_link_from_feed = {}
for attribute_value in feed_item['attributeValues']:
if attribute_value['feedAttributeId'] in feed_mappings:
for field_id in feed_mappings['attributeValue']['feedAttributeId']:
if field_id == SITE_LINK_FIELDS['TEXT']:
site_link_from_feed['text'] = attribute_value['stringValue']
elif field_id == SITE_LINK_FIELDS['URL']:
site_link_from_feed['url'] = attribute_value['stringValue']
elif field_id == SITE_LINK_FIELDS['FINAL_URLS']:
site_link_from_feed['finalUrls'] = attribute_value['stringValues']
elif field_id == SITE_LINK_FIELDS['FINAL_MOBILE_URLS']:
site_link_from_feed['finalMobileUrls'] = attribute_value[
'stringValues']
elif field_id == SITE_LINK_FIELDS['TRACKING_URL_TEMPLATE']:
site_link_from_feed['trackingUrlTemplate'] = attribute_value[
'stringValue']
elif field_id == SITE_LINK_FIELDS['LINE2']:
site_link_from_feed['line2'] = attribute_value['stringValue']
elif field_id == SITE_LINK_FIELDS['LINE3']:
site_link_from_feed['line3'] = attribute_value['stringValue']
else:
print 'No applicable Site Link Field found for Id: %s' % field_id
if 'scheduling' in feed_item:
site_link_from_feed['scheduling'] = feed_item['scheduling']
feed_items[feed_item['feedItemId']] = site_link_from_feed
return feed_items
def main(client):
# Get all of the feeds for the current user.
feeds = GetFeeds(client)
for feed in feeds:
# Retrieve all the sitelinks from the current feed.
feed_items = GetSitelinksFromFeed(client, feed)
# Get all the instances where a sitelink from this feed has been added to a
# campaign.
campaign_feeds = GetCampaignFeeds(client, feed, PLACEHOLDER_TYPE_SITELINKS)
all_feed_items_to_delete = []
for campaign_feed in campaign_feeds:
# Retrieve the sitelinks that have been associated with this Campaign.
feed_item_ids = GetFeedItemIdsForCampaign(campaign_feed)
if feed_item_ids == 0:
print ('Migration skipped for campaign feed with campaign ID %d '
'and feed ID %d because no mapped feed item IDs were found in '
'the campaign feed\'s matching function.'
% (campaign_feed['campaign_id'], campaign_feed['feed_id']))
continue
platform_restrictions = GetPlatformRestrictions(campaign_feed)
# Delete the campaign feed that associates the sitelinks from the feed to
# the Campaign.
DeleteCampaignFeed(client, campaign_feed)
# Create extension settings instead of sitelinks.
CreateExtensionSetting(client, feed_items, campaign_feed, feed_item_ids,
platform_restrictions)
# Mark the sitelinks from the feed for deletion.
all_feed_items_to_delete.extend(feed_item_ids)
# Delete all the sitelinks from the feed.
DeleteOldFeedItems(client, all_feed_items_to_delete, feed)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
| {
"content_hash": "0f5fc0f6c9f4647e2007d6bb479563d4",
"timestamp": "",
"source": "github",
"line_count": 513,
"max_line_length": 83,
"avg_line_length": 30.348927875243664,
"alnum_prop": 0.6417239385959278,
"repo_name": "ya7lelkom/googleads-python-lib",
"id": "0b1345495975fa336f512fc42bef992998a6b58a",
"size": "16187",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/adwords/v201502/migration/migrate_to_extension_settings.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "HTML",
"bytes": "8336"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2535232"
}
],
"symlink_target": ""
} |
xmpp = runtime.start("xmpp","Xmpp")
# adds the python service as a listener for messages
xmpp.addListener("python","publishMessage")
# there is a big list of different xmpp/jabber servers out there
# but we will connect to the big one - since that is where our robots account is
xmpp.connect("talk.google.com", 5222, "robot01@myrobotlab.org", "xxxxxxx")
# gets list of all the robots friends
print xmpp.getRoster()
# set your online status
xmpp.setStatus(True, "online all the time")
# add auditors you want this robot to chat with
# auditors can issue commands and will be notified of
# commands being sent by others and what those commands return
xmpp.addAuditor("Joe Smith")
xmpp.addAuditor("Jane Smith")
# send a message
xmpp.sendMessage("hello this is robot01 - the current heatbed temperature is 40 degrees celcius", "Joe Smith")
def publishMessage():
msg = msg_xmpp_publishMessage.data[0]
print msg.getFrom(), " says " , msg.getBody()
| {
"content_hash": "4a5b22e4769ddc96a2daa770edc41e70",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 110,
"avg_line_length": 35.25925925925926,
"alnum_prop": 0.7531512605042017,
"repo_name": "MyRobotLab/myrobotlab",
"id": "b25172352e739ebbb439ebb2ff5a937306c4c1ec",
"size": "1093",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/main/resources/resource/Xmpp/Xmpp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1542"
},
{
"name": "C",
"bytes": "6677"
},
{
"name": "C++",
"bytes": "274868"
},
{
"name": "CSS",
"bytes": "83744"
},
{
"name": "GLSL",
"bytes": "757"
},
{
"name": "HTML",
"bytes": "374401"
},
{
"name": "Java",
"bytes": "7100082"
},
{
"name": "JavaScript",
"bytes": "1536187"
},
{
"name": "Propeller Spin",
"bytes": "14406"
},
{
"name": "Python",
"bytes": "191671"
},
{
"name": "Shell",
"bytes": "3547"
}
],
"symlink_target": ""
} |
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('search_jobs')
@click.argument("tool_id", type=str)
@click.argument("inputs", type=str)
@click.option(
"--state",
help="only return jobs in this state",
type=str
)
@pass_context
@custom_exception
@json_output
def cli(ctx, tool_id, inputs, state=""):
"""Return jobs matching input parameters.
Output:
Summary information for each matching job
This method is designed to scan the list of previously run jobs and find
records of jobs with identical input parameters and datasets. This can
be used to minimize the amount of repeated work by simply recycling the
old results.
.. versionchanged:: 0.16.0
Replaced the ``job_info`` parameter with separate ``tool_id``,
``inputs`` and ``state``.
.. note::
This method is only supported by Galaxy 18.01 or later.
"""
return ctx.gi.jobs.search_jobs(tool_id, inputs, state=state)
| {
"content_hash": "e1289d616f8195119af6c0578f18fbfd",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 80,
"avg_line_length": 29.63888888888889,
"alnum_prop": 0.6776007497656982,
"repo_name": "galaxy-iuc/parsec",
"id": "55c0a14fc3d3c8fd5c777b55eba8138e288f7f1d",
"size": "1067",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parsec/commands/jobs/search_jobs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "194"
},
{
"name": "Python",
"bytes": "187279"
}
],
"symlink_target": ""
} |
__author__ = 'frankhe'
import numpy as np
import time
import theano
floatX = theano.config.floatX
class DataSet(object):
def __init__(self, width, height, rng, max_steps=1000000, phi_length=4, discount=0.99, batch_size=32,
transitions_len=4):
self.width = width
self.height = height
self.max_steps = max_steps
self.phi_length = phi_length
self.rng = rng
self.discount = discount
self.discount_table = np.power(self.discount, np.arange(30))
self.imgs = np.zeros((max_steps, height, width), dtype='uint8')
self.actions = np.zeros(max_steps, dtype='int32')
self.rewards = np.zeros(max_steps, dtype=floatX)
self.return_value = np.zeros(max_steps, dtype=floatX)
self.terminal = np.zeros(max_steps, dtype='bool')
self.terminal_index = np.zeros(max_steps, dtype='int32')
self.start_index = np.zeros(max_steps, dtype='int32')
self.bottom = 0
self.top = 0
self.size = 0
self.center_imgs = np.zeros((batch_size,
self.phi_length,
self.height,
self.width),
dtype='uint8')
self.forward_imgs = np.zeros((batch_size,
transitions_len,
self.phi_length,
self.height,
self.width),
dtype='uint8')
self.backward_imgs = np.zeros((batch_size,
transitions_len,
self.phi_length,
self.height,
self.width),
dtype='uint8')
self.center_positions = np.zeros((batch_size, 1), dtype='int32')
self.forward_positions = np.zeros((batch_size, transitions_len), dtype='int32')
self.backward_positions = np.zeros((batch_size, transitions_len), dtype='int32')
self.center_actions = np.zeros((batch_size, 1), dtype='int32')
self.backward_actions = np.zeros((batch_size, transitions_len), dtype='int32')
self.center_terminals = np.zeros((batch_size, 1), dtype='bool')
self.center_rewards = np.zeros((batch_size, 1), dtype=floatX)
self.center_return_values = np.zeros((batch_size, 1), dtype=floatX)
self.forward_return_values = np.zeros((batch_size, transitions_len), dtype=floatX)
self.backward_return_values = np.zeros((batch_size, transitions_len), dtype=floatX)
self.forward_discounts = np.zeros((batch_size, transitions_len), dtype=floatX)
self.backward_discounts = np.zeros((batch_size, transitions_len), dtype=floatX)
def add_sample(self, img, action, reward, terminal, return_value=0.0, start_index=-1):
self.imgs[self.top] = img
self.actions[self.top] = action
self.rewards[self.top] = reward
self.terminal[self.top] = terminal
self.return_value[self.top] = return_value
self.start_index[self.top] = start_index
self.terminal_index[self.top] = -1
if self.size == self.max_steps:
self.bottom = (self.bottom + 1) % self.max_steps
else:
self.size += 1
self.top = (self.top + 1) % self.max_steps
def __len__(self):
return self.size
def last_phi(self):
"""Return the most recent phi (sequence of image frames)."""
indexes = np.arange(self.top - self.phi_length, self.top)
return self.imgs.take(indexes, axis=0, mode='wrap')
def phi(self, img):
"""Return a phi (sequence of image frames), using the last phi_length -
1, plus img.
"""
indexes = np.arange(self.top - self.phi_length + 1, self.top)
phi = np.empty((self.phi_length, self.height, self.width), dtype='uint8')
phi[0:self.phi_length - 1] = self.imgs.take(indexes,
axis=0,
mode='wrap')
phi[-1] = img
return phi
def random_close_transitions_batch(self, batch_size, transitions_len):
transition_range = transitions_len
count = 0
while count < batch_size:
index = self.rng.randint(self.bottom,
self.bottom + self.size - self.phi_length)
all_indices = np.arange(index, index + self.phi_length)
center_index = index + self.phi_length - 1
"""
frame0 frame1 frame2 frame3
index center_index = index+phi-1
"""
if np.any(self.terminal.take(all_indices[0:-1], mode='wrap')):
continue
if np.any(self.terminal_index.take(all_indices, mode='wrap') == -1):
continue
terminal_index = self.terminal_index.take(center_index, mode='wrap')
start_index = self.start_index.take(center_index, mode='wrap')
self.center_positions[count] = center_index
self.center_terminals[count] = self.terminal.take(center_index, mode='wrap')
self.center_rewards[count] = self.rewards.take(center_index, mode='wrap')
""" get forward transitions """
if terminal_index < center_index:
terminal_index += self.size
max_forward_index = max(min(center_index + transition_range, terminal_index), center_index+1) + 1
self.forward_positions[count] = center_index + 1
for i, j in zip(range(transitions_len), range(center_index + 1, max_forward_index)):
self.forward_positions[count, i] = j
""" get backward transitions """
if start_index + self.size < center_index:
start_index += self.size
min_backward_index = max(center_index - transition_range, start_index+self.phi_length-1)
self.backward_positions[count] = center_index + 1
for i, j in zip(range(transitions_len), range(center_index - 1, min_backward_index - 1, -1)):
self.backward_positions[count, i] = j
if self.terminal_index.take(j, mode='wrap') == -1:
self.backward_positions[count, i] = center_index + 1
self.center_imgs[count] = self.imgs.take(all_indices, axis=0, mode='wrap')
for j in xrange(transitions_len):
forward_index = self.forward_positions[count, j]
backward_index = self.backward_positions[count, j]
self.forward_imgs[count, j] = self.imgs.take(
np.arange(forward_index - self.phi_length + 1, forward_index + 1), axis=0, mode='wrap')
self.backward_imgs[count, j] = self.imgs.take(
np.arange(backward_index - self.phi_length + 1, backward_index + 1), axis=0, mode='wrap')
self.center_actions[count] = self.actions.take(center_index, mode='wrap')
self.backward_actions[count] = self.actions.take(self.backward_positions[count], mode='wrap')
self.center_return_values[count] = self.return_value.take(center_index, mode='wrap')
self.forward_return_values[count] = self.return_value.take(self.forward_positions[count], mode='wrap')
self.backward_return_values[count] = self.return_value.take(self.backward_positions[count], mode='wrap')
distance = np.absolute(self.forward_positions[count] - center_index)
self.forward_discounts[count] = self.discount_table[distance]
distance = np.absolute(self.backward_positions[count] - center_index)
self.backward_discounts[count] = self.discount_table[distance]
# print self.backward_positions[count][::-1], self.center_positions[count], self.forward_positions[count]
# print 'start=', start_index, 'center=', self.center_positions[count], 'end=', terminal_index
# raw_input()
count += 1
def random_transitions_batch(self, batch_size, transitions_len, transition_range=10):
count = 0
while count < batch_size:
index = self.rng.randint(self.bottom,
self.bottom + self.size - self.phi_length)
all_indices = np.arange(index, index + self.phi_length)
center_index = index + self.phi_length - 1
"""
frame0 frame1 frame2 frame3
index center_index = index+phi-1
"""
if np.any(self.terminal.take(all_indices[0:-1], mode='wrap')):
continue
if np.any(self.terminal_index.take(all_indices, mode='wrap') == -1):
continue
terminal_index = self.terminal_index.take(center_index, mode='wrap')
start_index = self.start_index.take(center_index, mode='wrap')
self.center_positions[count] = center_index
self.center_terminals[count] = self.terminal.take(center_index, mode='wrap')
self.center_rewards[count] = self.rewards.take(center_index, mode='wrap')
""" get forward transitions """
if terminal_index < center_index:
terminal_index += self.size
max_forward_index = max(min(center_index + transition_range, terminal_index), center_index+1) + 1
self.forward_positions[count, 0] = center_index+1
if center_index + 2 >= max_forward_index:
self.forward_positions[count, 1:] = center_index + 1
else:
self.forward_positions[count, 1:] = self.rng.randint(center_index+2, max_forward_index, transitions_len-1)
""" get backward transitions """
if start_index + self.size < center_index:
start_index += self.size
min_backward_index = max(center_index - transition_range, start_index+self.phi_length-1)
if min_backward_index >= center_index:
self.backward_positions[count] = [center_index + 1] * transitions_len
else:
if center_index > self.top > min_backward_index:
min_backward_index = self.top
self.backward_positions[count] = self.rng.randint(min_backward_index, center_index, transitions_len)
self.center_imgs[count] = self.imgs.take(all_indices, axis=0, mode='wrap')
for j in xrange(transitions_len):
forward_index = self.forward_positions[count, j]
backward_index = self.backward_positions[count, j]
self.forward_imgs[count, j] = self.imgs.take(
np.arange(forward_index - self.phi_length + 1, forward_index + 1), axis=0, mode='wrap')
self.backward_imgs[count, j] = self.imgs.take(
np.arange(backward_index - self.phi_length + 1, backward_index + 1), axis=0, mode='wrap')
self.center_actions[count] = self.actions.take(center_index, mode='wrap')
self.backward_actions[count] = self.actions.take(self.backward_positions[count], mode='wrap')
self.center_return_values[count] = self.return_value.take(center_index, mode='wrap')
self.forward_return_values[count] = self.return_value.take(self.forward_positions[count], mode='wrap')
self.backward_return_values[count] = self.return_value.take(self.backward_positions[count], mode='wrap')
distance = np.absolute(self.forward_positions[count] - center_index)
self.forward_discounts[count] = self.discount_table[distance]
distance = np.absolute(self.backward_positions[count] - center_index)
self.backward_discounts[count] = self.discount_table[distance]
# print self.backward_positions[count][::-1], self.center_positions[count], self.forward_positions[count]
# print 'start=', start_index, 'center=', self.center_positions[count], 'end=', terminal_index
# raw_input()
count += 1
def random_imgs(self, size):
imgs = np.zeros((size,
self.phi_length + 1,
self.height,
self.width),
dtype='uint8')
count = 0
while count < size:
index = self.rng.randint(self.bottom,
self.bottom + self.size - self.phi_length)
all_indices = np.arange(index, index + self.phi_length + 1)
end_index = index + self.phi_length - 1
if np.any(self.terminal.take(all_indices[0:-2], mode='wrap')):
continue
imgs[count] = self.imgs.take(all_indices, axis=0, mode='wrap')
count += 1
return imgs
| {
"content_hash": "605bf77b4e36e4a281cc4d5b5f901208",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 122,
"avg_line_length": 51.948,
"alnum_prop": 0.5654115654115655,
"repo_name": "ShibiHe/Q-Optimality-Tightening",
"id": "75092adab91a28c84d3e849d640dec74c78edce5",
"size": "13010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/ale_data_set.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "83356"
}
],
"symlink_target": ""
} |
from helpers import functions
from flask import Flask, request, session, url_for, redirect, \
render_template, abort, g, flash, _app_ctx_stack
from werkzeug import check_password_hash, generate_password_hash
def login():
"""Logs the user in."""
if g.user:
return redirect(functions.url_for('/'))
error = None
if request.method == 'POST':
user = functions.query_db('''select * from user where
username = ?''', [request.form['username']], one=True)
if user is None:
error = 'Invalid username'
elif not check_password_hash(user['pw_hash'],
request.form['password']):
error = 'Invalid password'
else:
flash('You were logged in')
session['user_id'] = user['user_id']
return redirect(functions.url_for('/'))
return render_template('login.html', error=error)
def register():
"""Registers the user."""
if g.user:
return redirect(functions.url_for('/'))
error = None
if request.method == 'POST':
if not request.form['username']:
error = 'You have to enter a username'
elif not request.form['email'] or \
'@' not in request.form['email']:
error = 'You have to enter a valid email address'
elif not request.form['password']:
error = 'You have to enter a password'
elif request.form['password'] != request.form['password2']:
error = 'The two passwords do not match'
elif functions.get_user_id(request.form['username']) is not None:
error = 'The username is already taken'
else:
db = functions.get_db()
db.execute('''insert into user (username, email, pw_hash) values (?, ?, ?)''',
[request.form['username'], request.form['email'],
generate_password_hash(request.form['password'])])
db.commit()
flash('You were successfully registered and can login now')
return redirect(functions.url_for('login'))
return render_template('register.html', error=error)
def logout():
"""Logs the user out."""
flash('You were logged out')
session.pop('user_id', None)
return redirect(functions.url_for('/public'))
| {
"content_hash": "22b0267d3d72baf79e16a0caf3f905ec",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 90,
"avg_line_length": 40.857142857142854,
"alnum_prop": 0.5895979020979021,
"repo_name": "RoseyG/CSITWIT",
"id": "c079546a3fa62f918ed95768cfb14c18012a7bcc",
"size": "2288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3080"
},
{
"name": "HTML",
"bytes": "4035"
},
{
"name": "Python",
"bytes": "11511"
}
],
"symlink_target": ""
} |
import unittest
import imath
import IECore
import IECoreScene
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class CustomOptionsTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
p = GafferScene.Plane()
options = GafferScene.CustomOptions()
options["in"].setInput( p["out"] )
# check that the scene hierarchy is passed through
self.assertEqual( options["out"].object( "/" ), IECore.NullObject() )
self.assertEqual( options["out"].transform( "/" ), imath.M44f() )
self.assertEqual( options["out"].bound( "/" ), imath.Box3f( imath.V3f( -0.5, -0.5, 0 ), imath.V3f( 0.5, 0.5, 0 ) ) )
self.assertEqual( options["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "plane" ] ) )
self.assertEqual( options["out"].object( "/plane" ), IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -0.5 ), imath.V2f( 0.5 ) ) ) )
self.assertEqual( options["out"].transform( "/plane" ), imath.M44f() )
self.assertEqual( options["out"].bound( "/plane" ), imath.Box3f( imath.V3f( -0.5, -0.5, 0 ), imath.V3f( 0.5, 0.5, 0 ) ) )
self.assertEqual( options["out"].childNames( "/plane" ), IECore.InternedStringVectorData() )
# check that we can make options
options["options"].addChild( Gaffer.NameValuePlug( "test", IECore.IntData( 10 ) ) )
options["options"].addChild( Gaffer.NameValuePlug( "test2", IECore.StringData( "10" ) ) )
g = options["out"]["globals"].getValue()
self.assertEqual( len( g ), 2 )
self.assertEqual( g["option:test"], IECore.IntData( 10 ) )
self.assertEqual( g["option:test2"], IECore.StringData( "10" ) )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["optionsNode"] = GafferScene.CustomOptions()
s["optionsNode"]["options"].addChild( Gaffer.NameValuePlug( "test", IECore.IntData( 10 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
s["optionsNode"]["options"].addChild( Gaffer.NameValuePlug( "test2", IECore.StringData( "10" ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
ss = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( ss )
g = s2["optionsNode"]["out"]["globals"].getValue()
self.assertEqual( len( g ), 2 )
self.assertEqual( g["option:test"], IECore.IntData( 10 ) )
self.assertEqual( g["option:test2"], IECore.StringData( "10" ) )
self.assertTrue( "options1" not in s2["optionsNode"] )
def testHashPassThrough( self ) :
# The hash of everything except the globals should be
# identical to the input, so that they share cache entries.
p = GafferScene.Plane()
options = GafferScene.CustomOptions()
options["in"].setInput( p["out"] )
options["options"].addChild( Gaffer.NameValuePlug( "test", IECore.IntData( 10 ) ) )
self.assertSceneHashesEqual( p["out"], options["out"], checks = self.allSceneChecks - { "globals" } )
def testDisabled( self ) :
p = GafferScene.Plane()
options = GafferScene.CustomOptions()
options["in"].setInput( p["out"] )
options["options"].addChild( Gaffer.NameValuePlug( "test", IECore.IntData( 10 ) ) )
self.assertSceneHashesEqual( p["out"], options["out"], checks = self.allSceneChecks - { "globals" } )
self.assertNotEqual( options["out"]["globals"].hash(), p["out"]["globals"].hash() )
options["enabled"].setValue( False )
self.assertSceneHashesEqual( p["out"], options["out"] )
self.assertScenesEqual( p["out"], options["out"] )
def testDirtyPropagation( self ) :
p = GafferScene.Plane()
o = GafferScene.CustomOptions()
o["in"].setInput( p["out"] )
cs = GafferTest.CapturingSlot( o.plugDirtiedSignal() )
p["dimensions"]["x"].setValue( 100.1 )
dirtiedPlugs = { x[0] for x in cs if not x[0].getName().startswith( "__" ) }
self.assertEqual(
dirtiedPlugs,
{
o["in"]["bound"],
o["in"]["childBounds"],
o["in"]["object"],
o["in"],
o["out"]["bound"],
o["out"]["childBounds"],
o["out"]["object"],
o["out"],
}
)
def testSubstitution( self ) :
o = GafferScene.CustomOptions()
o["options"].addChild( Gaffer.NameValuePlug( "test", "${foo}" ) )
self.assertEqual( o["out"]["globals"].getValue()["option:test"], IECore.StringData( "" ) )
h = o["out"]["globals"].hash()
c = Gaffer.Context()
c["foo"] = "foo"
with c :
self.assertNotEqual( o["out"]["globals"].hash(), h )
self.assertEqual( o["out"]["globals"].getValue()["option:test"], IECore.StringData( "foo" ) )
def testDirtyPropagationOnMemberAdditionAndRemoval( self ) :
o = GafferScene.CustomOptions()
cs = GafferTest.CapturingSlot( o.plugDirtiedSignal() )
p = Gaffer.NameValuePlug( "test", IECore.IntData( 10 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
o["options"].addChild( p )
self.assertTrue( o["out"]["globals"] in [ c[0] for c in cs ] )
del cs[:]
o["options"].removeChild( p )
self.assertTrue( o["out"]["globals"] in [ c[0] for c in cs ] )
def testSetsPassThrough( self ) :
p = GafferScene.Plane()
p["sets"].setValue( "a b" )
o = GafferScene.CustomOptions()
o["in"].setInput( p["out"] )
self.assertEqual( p["out"]["setNames"].hash(), o["out"]["setNames"].hash() )
self.assertTrue( p["out"]["setNames"].getValue( _copy = False ).isSame( o["out"]["setNames"].getValue( _copy = False ) ) )
self.assertEqual( p["out"].setHash( "a" ), o["out"].setHash( "b" ) )
self.assertTrue( p["out"].set( "a", _copy = False ).isSame( o["out"].set( "b", _copy = False ) ) )
def testPrefix( self ) :
options = GafferScene.CustomOptions()
options["options"].addChild( Gaffer.NameValuePlug( "test", IECore.IntData( 10 ) ) )
options["prefix"].setValue( "myCategory:" )
g = options["out"]["globals"].getValue()
self.assertEqual( g["option:myCategory:test"], IECore.IntData( 10 ) )
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "44abe6d4d3dad7e505c6e0b03074feac",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 163,
"avg_line_length": 34.604790419161674,
"alnum_prop": 0.6523620003460806,
"repo_name": "lucienfostier/gaffer",
"id": "85882f711c375483b40f96eabc752546e8a9178f",
"size": "7639",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/GafferSceneTest/CustomOptionsTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41979"
},
{
"name": "C++",
"bytes": "7610953"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "7892655"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
} |
import os
from tempfile import mkstemp
from numpy.testing import *
from numpy.distutils.npy_pkg_config import read_config, parse_flags
simple = """\
[meta]
Name = foo
Description = foo lib
Version = 0.1
[default]
cflags = -I/usr/include
libs = -L/usr/lib
"""
simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib',
'version': '0.1', 'name': 'foo'}
simple_variable = """\
[meta]
Name = foo
Description = foo lib
Version = 0.1
[variables]
prefix = /foo/bar
libdir = ${prefix}/lib
includedir = ${prefix}/include
[default]
cflags = -I${includedir}
libs = -L${libdir}
"""
simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib',
'version': '0.1', 'name': 'foo'}
class TestLibraryInfo(TestCase):
def test_simple(self):
fd, filename = mkstemp('foo.ini')
try:
pkg = os.path.splitext(filename)[0]
try:
os.write(fd, simple)
finally:
os.close(fd)
out = read_config(pkg)
self.failUnless(out.cflags() == simple_d['cflags'])
self.failUnless(out.libs() == simple_d['libflags'])
self.failUnless(out.name == simple_d['name'])
self.failUnless(out.version == simple_d['version'])
finally:
os.remove(filename)
def test_simple_variable(self):
fd, filename = mkstemp('foo.ini')
try:
pkg = os.path.splitext(filename)[0]
try:
os.write(fd, simple_variable)
finally:
os.close(fd)
out = read_config(pkg)
self.failUnless(out.cflags() == simple_variable_d['cflags'])
self.failUnless(out.libs() == simple_variable_d['libflags'])
self.failUnless(out.name == simple_variable_d['name'])
self.failUnless(out.version == simple_variable_d['version'])
out.vars['prefix'] = '/Users/david'
self.failUnless(out.cflags() == '-I/Users/david/include')
finally:
os.remove(filename)
class TestParseFlags(TestCase):
def test_simple_cflags(self):
d = parse_flags("-I/usr/include")
self.failUnless(d['include_dirs'] == ['/usr/include'])
d = parse_flags("-I/usr/include -DFOO")
self.failUnless(d['include_dirs'] == ['/usr/include'])
self.failUnless(d['macros'] == ['FOO'])
d = parse_flags("-I /usr/include -DFOO")
self.failUnless(d['include_dirs'] == ['/usr/include'])
self.failUnless(d['macros'] == ['FOO'])
def test_simple_lflags(self):
d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar")
self.failUnless(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
self.failUnless(d['libraries'] == ['foo', 'bar'])
d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar")
self.failUnless(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
self.failUnless(d['libraries'] == ['foo', 'bar'])
| {
"content_hash": "6b07714b5757db49fd9546ada5a3560d",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 82,
"avg_line_length": 30.947916666666668,
"alnum_prop": 0.5624368899360485,
"repo_name": "plaes/numpy",
"id": "5553aa8786485adb6b8d158040a416cd109d6b51",
"size": "2971",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "numpy/distutils/tests/test_npy_pkg_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Do a minimal test of all the modules that aren't otherwise tested."""
import importlib
import sys
from test import support
import unittest
class TestUntestedModules(unittest.TestCase):
def test_untested_modules_can_be_imported(self):
untested = ('encodings', 'formatter', 'tabnanny')
with support.check_warnings(quiet=True):
for name in untested:
try:
support.import_module('test.test_{}'.format(name))
except unittest.SkipTest:
importlib.import_module(name)
else:
self.fail('{} has tests even though test_sundry claims '
'otherwise'.format(name))
import distutils.bcppcompiler
import distutils.ccompiler
import distutils.cygwinccompiler
import distutils.filelist
import distutils.text_file
import distutils.unixccompiler
import distutils.command.bdist_dumb
if sys.platform.startswith('win'):
import distutils.command.bdist_msi
import distutils.command.bdist
import distutils.command.bdist_rpm
import distutils.command.bdist_wininst
import distutils.command.build_clib
import distutils.command.build_ext
import distutils.command.build
import distutils.command.clean
import distutils.command.config
import distutils.command.install_data
import distutils.command.install_egg_info
import distutils.command.install_headers
import distutils.command.install_lib
import distutils.command.register
import distutils.command.sdist
import distutils.command.upload
import html.entities
try:
import tty # Not available on Windows
except ImportError:
if support.verbose:
print("skipping tty")
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "219c88378da385a0b9c03ac67c68c7d0",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 76,
"avg_line_length": 37.267857142857146,
"alnum_prop": 0.5975083852419741,
"repo_name": "FFMG/myoddweb.piger",
"id": "4025c2354a6cdbe3f6db7fd74f6e1e6594165103",
"size": "2087",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "monitor/api/python/Python-3.7.2/Lib/test/test_sundry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "89079"
},
{
"name": "Assembly",
"bytes": "399228"
},
{
"name": "Batchfile",
"bytes": "93889"
},
{
"name": "C",
"bytes": "32256857"
},
{
"name": "C#",
"bytes": "197461"
},
{
"name": "C++",
"bytes": "200544641"
},
{
"name": "CMake",
"bytes": "192771"
},
{
"name": "CSS",
"bytes": "441704"
},
{
"name": "CWeb",
"bytes": "174166"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "Cuda",
"bytes": "52444"
},
{
"name": "DIGITAL Command Language",
"bytes": "33549"
},
{
"name": "DTrace",
"bytes": "2157"
},
{
"name": "Fortran",
"bytes": "1856"
},
{
"name": "HTML",
"bytes": "181677643"
},
{
"name": "IDL",
"bytes": "14"
},
{
"name": "Inno Setup",
"bytes": "9647"
},
{
"name": "JavaScript",
"bytes": "705756"
},
{
"name": "Lex",
"bytes": "1231"
},
{
"name": "Lua",
"bytes": "3332"
},
{
"name": "M4",
"bytes": "259214"
},
{
"name": "Makefile",
"bytes": "1262318"
},
{
"name": "Max",
"bytes": "36857"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "Objective-C",
"bytes": "2167778"
},
{
"name": "Objective-C++",
"bytes": "630"
},
{
"name": "PHP",
"bytes": "59030"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Pascal",
"bytes": "75208"
},
{
"name": "Perl",
"bytes": "42080"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "11781"
},
{
"name": "Python",
"bytes": "30377308"
},
{
"name": "QML",
"bytes": "593"
},
{
"name": "QMake",
"bytes": "16692"
},
{
"name": "Rebol",
"bytes": "354"
},
{
"name": "Rich Text Format",
"bytes": "6743"
},
{
"name": "Roff",
"bytes": "55661"
},
{
"name": "Ruby",
"bytes": "5532"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Shell",
"bytes": "783974"
},
{
"name": "TSQL",
"bytes": "1201"
},
{
"name": "Tcl",
"bytes": "1172"
},
{
"name": "TeX",
"bytes": "32117"
},
{
"name": "Visual Basic",
"bytes": "70"
},
{
"name": "XSLT",
"bytes": "552736"
},
{
"name": "Yacc",
"bytes": "19623"
}
],
"symlink_target": ""
} |
from pycoin.blockchain.BlockChain import BlockChain
class FakeBlock(object):
def __init__(self, n, previous_block_hash=None):
if previous_block_hash is None:
previous_block_hash = n - 1
self.n = n
self.previous_block_hash = previous_block_hash
self.difficulty = 1
def hash(self):
return self.n
def longest_block_chain(self):
c = []
for idx in range(self.length()):
c.append(self.hash_for_index(idx))
return c
def longest_locked_block_chain(self):
c = []
for idx in range(self.locked_length(), self.length()):
c.append(self.hash_for_index(idx))
return c
parent_for_0 = "motherless"
def test_basic():
BC = BlockChain(parent_for_0)
ITEMS = [FakeBlock(i) for i in range(100)]
ITEMS[0] = FakeBlock(0, parent_for_0)
assert longest_block_chain(BC) == []
assert BC.length() == 0
assert BC.locked_length() == 0
assert set(BC.chain_finder.missing_parents()) == set()
assert BC.parent_hash == parent_for_0
assert BC.index_for_hash(0) is None
assert BC.index_for_hash(-1) is None
ops = BC.add_headers(ITEMS[:5])
assert ops == [("add", ITEMS[i], i) for i in range(5)]
assert BC.parent_hash == parent_for_0
assert longest_block_chain(BC) == list(range(5))
assert BC.length() == 5
assert BC.locked_length() == 0
assert set(BC.chain_finder.missing_parents()) == {parent_for_0}
for i in range(5):
v = BC.tuple_for_index(i)
assert v[0] == i
assert v[1] == parent_for_0 if i == 0 else i
assert BC.index_for_hash(-1) is None
ops = BC.add_headers(ITEMS[:7])
assert ops == [("add", ITEMS[i], i) for i in range(5, 7)]
assert BC.parent_hash == parent_for_0
assert longest_block_chain(BC) == list(range(7))
assert BC.length() == 7
assert BC.locked_length() == 0
assert set(BC.chain_finder.missing_parents()) == {parent_for_0}
for i in range(7):
v = BC.tuple_for_index(i)
assert v[0] == i
assert v[1] == parent_for_0 if i == 0 else i
assert BC.index_for_hash(-1) is None
ops = BC.add_headers(ITEMS[10:14])
assert ops == []
assert BC.parent_hash == parent_for_0
assert longest_block_chain(BC) == [0, 1, 2, 3, 4, 5, 6]
assert BC.locked_length() == 0
assert BC.locked_length() == 0
assert BC.length() == 7
assert set(BC.chain_finder.missing_parents()) == {parent_for_0, 9}
for i in range(7):
v = BC.tuple_for_index(i)
assert v[0] == i
assert v[1] == parent_for_0 if i == 0 else i
assert BC.index_for_hash(-1) is None
ops = BC.add_headers(ITEMS[7:10])
assert ops == [("add", ITEMS[i], i) for i in range(7, 14)]
assert longest_block_chain(BC) == list(range(14))
assert set(BC.chain_finder.missing_parents()) == {parent_for_0}
assert BC.parent_hash == parent_for_0
assert BC.locked_length() == 0
assert BC.length() == 14
for i in range(14):
v = BC.tuple_for_index(i)
assert v[0] == i
assert v[1] == parent_for_0 if i == 0 else i
assert BC.index_for_hash(-1) is None
ops = BC.add_headers(ITEMS[90:])
assert ops == []
assert longest_block_chain(BC) == list(range(14))
assert set(BC.chain_finder.missing_parents()) == {parent_for_0, 89}
assert BC.parent_hash == parent_for_0
assert BC.locked_length() == 0
assert BC.length() == 14
for i in range(14):
v = BC.tuple_for_index(i)
assert v[0] == i
assert v[1] == parent_for_0 if i == 0 else i
assert BC.index_for_hash(-1) is None
ops = BC.add_headers(ITEMS[14:90])
assert ops == [("add", ITEMS[i], i) for i in range(14, 100)]
assert longest_block_chain(BC) == list(range(100))
assert set(BC.chain_finder.missing_parents()) == {parent_for_0}
assert BC.parent_hash == parent_for_0
assert BC.locked_length() == 0
assert BC.length() == 100
for i in range(100):
v = BC.tuple_for_index(i)
assert v[0] == i
assert v[1] == parent_for_0 if i == 0 else i
assert BC.index_for_hash(-1) is None
def test_fork():
parent_for_0 = b'\0' * 32
# 0 <= 1 <= ... <= 5 <= 6
# 3 <= 301 <= 302 <= 303 <= 304 <= 305
# parent_for_0 = "motherless"
BC = BlockChain(parent_for_0)
ITEMS = dict((i, FakeBlock(i)) for i in range(7))
ITEMS[0] = FakeBlock(0, parent_for_0)
ITEMS.update(dict((i, FakeBlock(i)) for i in range(301, 306)))
ITEMS[301] = FakeBlock(301, 3)
assert longest_block_chain(BC) == []
assert BC.locked_length() == 0
assert BC.length() == 0
assert set(BC.chain_finder.missing_parents()) == set()
# send them all except 302
ops = BC.add_headers((ITEMS[i] for i in ITEMS.keys() if i != 302))
assert ops == [("add", ITEMS[i], i) for i in range(7)]
assert set(BC.chain_finder.missing_parents()) == set([parent_for_0, 302])
# now send 302
ops = BC.add_headers([ITEMS[302]])
# we should see a change
expected = [("remove", ITEMS[i], i) for i in range(6, 3, -1)]
expected += [("add", ITEMS[i], i+4-301) for i in range(301, 306)]
assert ops == expected
assert set(BC.chain_finder.missing_parents()) == set([parent_for_0])
def test_callback():
R = []
def the_callback(blockchain, ops):
R.extend(ops)
parent_for_0 = b'\0' * 32
# same as test_fork, above
BC = BlockChain(parent_for_0)
BC.add_change_callback(the_callback)
ITEMS = dict((i, FakeBlock(i)) for i in range(7))
ITEMS[0] = FakeBlock(0, parent_for_0)
ITEMS.update(dict((i, FakeBlock(i)) for i in range(301, 306)))
ITEMS[301] = FakeBlock(301, 3)
# send them all except 302
BC.add_headers((ITEMS[i] for i in ITEMS.keys() if i != 302))
# now send 302
BC.add_headers([ITEMS[302]])
expected = [("add", ITEMS[i], i) for i in range(7)]
expected += [("remove", ITEMS[i], i) for i in range(6, 3, -1)]
expected += [("add", ITEMS[i], i+4-301) for i in range(301, 306)]
assert R == expected
def test_large():
SIZE = 3000
ITEMS = [FakeBlock(i) for i in range(SIZE)]
ITEMS[0] = FakeBlock(0, parent_for_0)
BC = BlockChain(parent_for_0)
assert longest_block_chain(BC) == []
assert BC.locked_length() == 0
assert BC.length() == 0
assert set(BC.chain_finder.missing_parents()) == set()
ops = BC.add_headers(ITEMS)
assert ops == [("add", ITEMS[i], i) for i in range(SIZE)]
assert longest_block_chain(BC) == list(range(SIZE))
assert set(BC.chain_finder.missing_parents()) == {parent_for_0}
assert BC.parent_hash == parent_for_0
assert BC.locked_length() == 0
assert BC.length() == SIZE
for i in range(SIZE):
v = BC.tuple_for_index(i)
assert v[0] == i
assert v[1] == parent_for_0 if i == 0 else i
assert BC.index_for_hash(-1) is None
def test_chain_locking():
SIZE = 2000
COUNT = 200
ITEMS = [FakeBlock(i, i-1) for i in range(SIZE*COUNT)]
ITEMS[0] = FakeBlock(0, parent_for_0)
BC = BlockChain(parent_for_0)
assert longest_block_chain(BC) == []
assert BC.locked_length() == 0
assert BC.length() == 0
assert set(BC.chain_finder.missing_parents()) == set()
for i in range(COUNT):
start, end = i*SIZE, (i+1)*SIZE
lock_start = max(0, start-10)
expected_parent = lock_start-1 if lock_start else parent_for_0
assert BC.length() == start
assert BC.locked_length() == lock_start
ops = BC.add_headers(ITEMS[start:end])
assert ops == [("add", ITEMS[i], i) for i in range(start, end)]
assert longest_locked_block_chain(BC) == list(range(lock_start, end))
assert set(BC.chain_finder.missing_parents()) == {expected_parent}
assert BC.parent_hash == expected_parent
assert BC.locked_length() == lock_start
assert BC.length() == end
for i in range(start, end):
v = BC.tuple_for_index(i)
assert v[0] == i
assert v[1] == parent_for_0 if i == 0 else i
assert BC.index_for_hash(-1) is None
assert BC.locked_length() == max(0, lock_start)
BC.lock_to_index(end-10)
assert BC.locked_length() == end-10
| {
"content_hash": "87f465e98845c79abe73474c38d31f7e",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 77,
"avg_line_length": 33.57959183673469,
"alnum_prop": 0.5885498966816579,
"repo_name": "shivaenigma/pycoin",
"id": "f1f23b47ae805953fa5b02cd5e4a87d941469e3c",
"size": "8228",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/blockchain_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "115"
},
{
"name": "Python",
"bytes": "612097"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, get_object_or_404
from blog.models import Post
def index(request):
#get the blog posts that are published
posts = Post.objects.filter(published=True)
#now return the rendered template
return render(request,'blog/index.html',{'posts':posts})
def post(request,slug):
#get the Post object
post = get_object_or_404(Post,slug=slug)
# now return the rendered template
return render(request,'blog/post.html',{'post':post}) | {
"content_hash": "ddaa1dc5571472e9c2d614275c6548dd",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 57,
"avg_line_length": 33,
"alnum_prop": 0.7597402597402597,
"repo_name": "intuinno/vistalk",
"id": "2dd5533d54d5fad205e490161d57ec510bd845b9",
"size": "488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "117204"
},
{
"name": "JavaScript",
"bytes": "5728465"
},
{
"name": "Python",
"bytes": "395122"
}
],
"symlink_target": ""
} |
"""Mass Upload Libray - processing metadata values."""
__authors__ = 'User:Jean-Frédéric'
import re
import os
import codecs
from collections import defaultdict
import pywikibot.textlib as textlib
def remove_linebreaks(field, old_field_value):
new_value = old_field_value.replace('\n', ' ').replace('\r', '').replace(' ', ' ')
return {field: new_value}
def wrap_with_template(template='fr'):
pattern = '{{' + template + '|%s}}'
return wrap_within_pattern_i, {'pattern': pattern}
def wrap_within_pattern(pattern='%s'):
return wrap_within_pattern_i, {'pattern': pattern}
def wrap_within_pattern_i(field, old_field_value, pattern='%s'):
new_value = pattern % old_field_value.replace('/', '-')
new_field = "%s_wrapped" % field
return {new_field: new_value, field: old_field_value}
def split_and_keep_as_list(separator=" ; "):
"""Split and make a list
Return a dictionary of one item
(key is field, element is the splitted values)
"""
return split_and_keep_as_list_i, {'separator': separator}
def split_and_keep_as_list_i(field, old_field_value, separator=" ; "):
"""Split and make a list
Return a dictionary of one item
(key is field, element is the splitted values)
"""
return {field: [x.strip() for x in old_field_value.split(separator)]}
def join_all(field, old_field_value, separator=" ; "):
"""Join the values together.
Return a dictionary of one item
(key is field, element is the values joined together)
"""
return {field: separator.join(old_field_value)}
def map_and_apply_technique(separator=","):
"""Return a list of technique converted to use {{Technique}}."""
support_mapper = {
'papier': 'paper',
'parchemin': 'parchment',
'registre de Parchemin': 'parchment',
'cire': 'wax',
'cuir': 'leather',
'plâtre': 'plaster',
'bois': 'wood',
'érable': 'maple',
'velin': 'Vellum',
'tissu': 'fabric',
'plomb': 'lead',
'gravure': 'engraving'
}
return (split_and_apply_template_on_each, {'template': 'Technique',
'mapper': support_mapper,
'separator': separator
})
def split_and_apply_template_on_each(field, old_field_value, template, mapper, separator=","):
"""Split the old value against the given separator and apply the template on each part"""
bits = [x.strip().lower() for x in old_field_value.split(separator)]
translated_bits = [mapper.get(x, x) for x in bits]
new_value = ', '.join(["{{%s|%s}}" % (template, x) for x in translated_bits])
return {field: new_value}
def process_DIMS(field, old_field_value):
"""Process the Joconde DIMS field.
Split the field by each dimension
Build a dictionary with it
Return the dictionary
"""
DIMS = old_field_value
pattern = '(\w+?)[\.:]\s?([\d,]*)\s?(\w*)\s?;?\s?'
splitted = filter(lambda a: a != u'', re.split(pattern, DIMS))
try:
DIMS_BIS = dict(zip([str(x) for x in splitted[0::3]],
[float(x.replace(',', '.')) for x in splitted[1::3]]))
if len(splitted[2::3]) > 0:
DIMS_BIS['unit'] = splitted[2::3][0]
return {'%s_commons' % field: make_size_template(DIMS_BIS)}
return DIMS_BIS
except ValueError:
return {field: old_field_value}
def make_size_template(dictionary):
#{u'JOCONDE_DIMS_l': 77.0, u'JOCONDE_DIMS_unit': u'cm', u'JOCONDE_DIMS_H': 71.0, u'JOCONDE_DIMS_L': 191.0}
template = "Size|unit={0[unit]}|length={0[L]}|height={0[H]}|width={0[l]}|depth={0[P]}|diameter={0[D]}|thickness={0[P]}"
return "{{" + template.format(defaultdict(str, dictionary)) + "}}"
def make_categories(categories):
"""Build the wikitext for a given list of category names."""
return "\n".join(["[[Category:%s]]" % x for x in categories])
def process_with_alignment(field, old_field_value, mapper=None):
"""Retrieve the alignment for a given record contents."""
new_value = dict()
(value, categories) = mapper[field].get(old_field_value, ("", []))
if value:
new_value[field] = value
else:
new_value[field] = old_field_value
if categories:
new_value['categories'] = categories
return new_value
def process_with_alignment_on_list(field, old_field_value, mapper=None):
new_value = dict()
all_value = set()
all_categories = set()
for content in old_field_value:
content = content.strip()
(value, categories) = mapper[field].get(content, ("", []))
if value:
all_value.add(value)
if categories:
#TODO Handle several categories given in the alignment
all_categories.add(categories)
if all_value:
new_value[field] = all_value
else:
new_value[field] = old_field_value
new_value['categories'] = all_categories
return new_value
def parse_categories(some_string):
"""Returns the categories contained in a string."""
pattern = u'\[\[:?Category:(.+?)\]]'
black_list = [u'', u'\n']
splitted = filter(lambda a: a not in black_list,
re.split(pattern, some_string))
return splitted
def _retrieve_from_wiki(filename, alignment_template):
"""Retrieve a metadata mapping from a given wikipage on disk.
Iterate over the given alignment template occurences,
retrieve and return the mapping values.
"""
wiki_file = os.path.join('wiki', filename.replace("/", ""))
try:
with codecs.open(wiki_file, mode='r', encoding='utf-8') as f:
all_templates = textlib.extract_templates_and_params(f.read())
field_mapper = dict()
for x in all_templates:
if x[0] == alignment_template:
field = x[1]['item'].strip()
raw_categories = x[1]['categories']
categories = parse_categories(raw_categories)
raw_value = x[1]['value'].strip()
field_mapper[field] = (raw_value, categories)
return field_mapper
except Exception, e:
print e
def retrieve_metadata_alignments(fields, alignment_template):
"""Retrieve metadata alignments from disk for all given fields.
Iterates over the given fields, determines the associate wikipage
and calls retrieve_alignment_from_wiki on each.
"""
alignments = dict()
for field in fields:
wikipage = field
alignments[field] = _retrieve_from_wiki(wikipage, alignment_template)
return alignments
def parse_format(field, old_field_value):
"""Parse a foramt and return the parsed."""
new_value = parse_format_unwrapped(old_field_value)
return {field: new_value}
def _clean_dim(dim):
"""Clean a dimension-like string"""
return re.sub(r"\s?,\s?", '.', dim).strip()
def _pattern_to_size(m):
"""Convert the pattern matched in {{Size}}."""
unit = 'cm'
elements = m.groupdict()
l = filter(None, [elements[x] for x in sorted(elements.keys())])
s = '|'.join([_clean_dim(dim) for dim in l])
return " {{Size|%s|%s}}" % (unit, s)
def parse_format_unwrapped(text):
format_pattern = re.compile(r"""
(?P<a>[\d,\.]+?) # Digits, comma or dot, captured as group
\s*[Xx×]\s* # Whitespace, x, whitespace
(?P<b>[\d,\.]+?) # Same
\s*cm? # Whitespace until the end
""", re.X)
new_value = re.sub(format_pattern, _pattern_to_size, text)
return new_value
def look_for_date(field, old_field_value):
"""Wrapper around look_for_date_unwrapped.
Retrieve the values found by look_for_date_unwrapped
Re-add the old_field_value to the dictionary
Add the date and the year if they were found
"""
(date, year) = look_for_date_unwrapped(old_field_value)
result = {field: old_field_value}
if date:
result['date'] = date
if year:
result['year'] = year
return result
def look_for_date_unwrapped(text):
"""Look for a date in the given text.
Search a given string for a date pattern, using regular expressions.
Return the date (either using the ISO YYY-MM-DD format
or the {{Other date}} template) and the year.
"""
monthList = {
u'janvier': 1, u'février': 2, u'mars': 3, u'avril': 4,
u'mai': 5, u'juin': 6, 'juillet': 7, u'août': 8,
u'septembre': 9, u'octobre': 10, u'novembre': 11, u'décembre': 12
}
fullDatePattern = re.compile("""
(?P<day>\d+?) # Some digits
\s? # Whitespace
(?P<month>[\w]+?) # Some letters, captured as 'month'
\s # Whitespace
(?P<year>\d{3,4}) # Three or four digits, captured
""", re.UNICODE + re.X)
monthDatePattern = re.compile("""
(?P<month>\w\w\w[\w]+?) # Some letters, captured as 'month'
\s # Whitespace
(?P<year>\d\d\d\d) # Four digits, captured as 'year'
""", re.UNICODE + re.X)
circaYearPattern = re.compile("""
Vers # The 'Vers' word
\s*? # Maybe some whitespace
(?P<year>\d\d\d\d) # Four digits, captured as 'year'
""", re.UNICODE + re.X)
yearPattern = re.compile("""
(?P<year>\d\d\d\d) # Four digits, captured as 'year'
""", re.UNICODE + re.X)
circaDatePattern = re.compile("""
Vers # The 'Vers' word
\s # Whitespace
(?P<month>\w*?) # Some letters, captured as 'month'
\s # Whitespace
(?P<year>\d\d\d\d) # Four digits, captured as 'year'>
""", re.UNICODE + re.X)
betweenDatePattern = re.compile("""
Entre # The 'Entre' word
[\s\w]*? # Whatever words and whitespace
\s # Whitespace
(?P<year1>\d\d\d\d) # Four digits
\s # Whitespace
et # The 'Et' word
\s # Whitespace
(?P<year2>\d\d\d\d) # Four digits
""", re.UNICODE + re.X)
orDatePattern = re.compile("""
(?P<year1>\d\d\d\d)
\sou\s
(?P<year2>\d\d\d\d)
""", re.UNICODE + re.X)
decadeDatePattern = re.compile("""
Ann\wes # The 'Années' word
\s # Whitespace
(?P<year>\d\d\d\d) # Four digits
""", re.UNICODE + re.X)
centuryPattern = re.compile("""
(?P<qualifier>Fin)?\s? # The 'Fin' word, possibly
(?P<century>\d\d) # Two digits
e # The 'e' letter
\s # Whitespace
si\wcle # The 'Siècle' word
""", re.UNICODE + re.X)
fullDateR = re.search(fullDatePattern, text)
monthDateR = re.search(monthDatePattern, text)
circaYearR = re.search(circaYearPattern, text)
circaDateR = re.search(circaDatePattern, text)
betweenDateR = re.search(betweenDatePattern, text)
orDateR = re.search(orDatePattern, text)
decadeDateR = re.search(decadeDatePattern, text)
centuryR = re.search(centuryPattern, text)
if betweenDateR:
date = u'{{Other date|between|%s|%s}}' % (betweenDateR.group('year1'),
betweenDateR.group('year2'))
return (date, None)
elif orDateR:
date = u'{{Other date|or|%s|%s}}' % (orDateR.group('year1'),
orDateR.group('year2'))
return (date, None)
elif decadeDateR:
date = u'{{Other date|decade|%s}}' % (decadeDateR.group('year'))
return (date, None)
elif fullDateR:
month = fullDateR.group('month').lower()
if month in monthList.keys():
monthNum = monthList[month]
year = fullDateR.group('year')
date = u'%s-%s-%s' % (year,
'%02d' % monthList[month],
'%02d' % int(fullDateR.group('day')))
dateCategory = u"%s in " % fullDateR.group('year')
return (date, year)
else:
return (None, None)
elif circaDateR:
month = circaDateR.group('month').lower()
if month in monthList.keys():
year = circaDateR.group('year')
date = u'{{Other date|circa|%s-%s}}' % (year,
'%02d' % monthList[month])
return (date, year)
elif circaYearR:
circaYear = circaYearR.group('year')
date = u'{{Other date|circa|%s}}' % (circaYear)
return (date, circaYear)
elif monthDateR:
month = monthDateR.group('month').lower()
if month in monthList.keys():
year = monthDateR.group('year')
date = u'%s-%s' % (year, '%02d' % monthList[month])
dateCategory = u"%s in " % monthDateR.group('year')
return (date, year)
else:
return (None, None)
elif centuryR:
century = centuryR.group('century')
date = '{{Other date|century|%s}}' % (century)
if centuryR.groupdict()['qualifier']:
qualifier = centuryR.group('qualifier').lower()
table = {'fin': 'end'}
date = u'{{Other date|%s|%s}}' % (table[qualifier], date)
return (date, None)
else:
return (None, None)
| {
"content_hash": "c72c7e1d5a5e7293a0fdcc3c58e6fb8f",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 123,
"avg_line_length": 35.381443298969074,
"alnum_prop": 0.5504807692307693,
"repo_name": "Commonists/MassUploadLibrary",
"id": "05845884844c80a005f83af159afbf209c6a6305",
"size": "13755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uploadlibrary/PostProcessing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42317"
}
],
"symlink_target": ""
} |
"""
RPM Package builder system
.. versionadded:: 2015.8.0
This system allows for all of the components to build rpms safely in chrooted
environments. This also provides a function to generate yum repositories
This module implements the pkgbuild interface
"""
import errno
import functools
import logging
import os
import re
import shutil
import tempfile
import time
import traceback
import urllib.parse
import salt.utils.files
import salt.utils.path
import salt.utils.user
import salt.utils.vt
from salt.exceptions import CommandExecutionError, SaltInvocationError
HAS_LIBS = False
try:
import gnupg # pylint: disable=unused-import
import salt.modules.gpg
HAS_LIBS = True
except ImportError:
pass
log = logging.getLogger(__name__)
__virtualname__ = "pkgbuild"
def __virtual__():
"""
Confirm this module is on a RPM based system, and has required utilities
"""
missing_util = False
utils_reqd = ["gpg", "rpm", "rpmbuild", "mock", "createrepo"]
for named_util in utils_reqd:
if not salt.utils.path.which(named_util):
missing_util = True
break
if HAS_LIBS and not missing_util:
if __grains__.get("os_family", False) in ("RedHat", "Suse"):
return __virtualname__
else:
# The module will be exposed as `rpmbuild` on non-RPM based systems
return "rpmbuild"
else:
return (
False,
"The rpmbuild module could not be loaded: requires python-gnupg, "
"gpg, rpm, rpmbuild, mock and createrepo utilities to be installed",
)
def _create_rpmmacros(runas="root"):
"""
Create the .rpmmacros file in user's home directory
"""
home = os.path.expanduser("~" + runas)
rpmbuilddir = os.path.join(home, "rpmbuild")
if not os.path.isdir(rpmbuilddir):
__salt__["file.makedirs_perms"](name=rpmbuilddir, user=runas, group="mock")
mockdir = os.path.join(home, "mock")
if not os.path.isdir(mockdir):
__salt__["file.makedirs_perms"](name=mockdir, user=runas, group="mock")
rpmmacros = os.path.join(home, ".rpmmacros")
with salt.utils.files.fopen(rpmmacros, "w") as afile:
afile.write(salt.utils.stringutils.to_str("%_topdir {}\n".format(rpmbuilddir)))
afile.write("%signature gpg\n")
afile.write("%_source_filedigest_algorithm 8\n")
afile.write("%_binary_filedigest_algorithm 8\n")
afile.write("%_gpg_name packaging@saltstack.com\n")
def _mk_tree(runas="root"):
"""
Create the rpm build tree
"""
basedir = tempfile.mkdtemp()
paths = ["BUILD", "RPMS", "SOURCES", "SPECS", "SRPMS"]
for path in paths:
full = os.path.join(basedir, path)
__salt__["file.makedirs_perms"](name=full, user=runas, group="mock")
return basedir
def _get_spec(tree_base, spec, template, saltenv="base"):
"""
Get the spec file and place it in the SPECS dir
"""
spec_tgt = os.path.basename(spec)
dest = os.path.join(tree_base, "SPECS", spec_tgt)
return __salt__["cp.get_url"](spec, dest, saltenv=saltenv)
def _get_src(tree_base, source, saltenv="base", runas="root"):
"""
Get the named sources and place them into the tree_base
"""
parsed = urllib.parse.urlparse(source)
sbase = os.path.basename(source)
dest = os.path.join(tree_base, "SOURCES", sbase)
if parsed.scheme:
lsrc = __salt__["cp.get_url"](source, dest, saltenv=saltenv)
else:
shutil.copy(source, dest)
__salt__["file.chown"](path=dest, user=runas, group="mock")
def _get_distset(tgt):
"""
Get the distribution string for use with rpmbuild and mock
"""
# Centos adds 'centos' string to rpm names, removing that to have
# consistent naming on Centos and Redhat, and allow for Amazon naming
tgtattrs = tgt.split("-")
if tgtattrs[0] == "amzn2":
distset = '--define "dist .{}"'.format(tgtattrs[0])
elif tgtattrs[1] in ["6", "7", "8"]:
distset = '--define "dist .el{}"'.format(tgtattrs[1])
else:
distset = ""
return distset
def _get_deps(deps, tree_base, saltenv="base"):
"""
Get include string for list of dependent rpms to build package
"""
deps_list = ""
if deps is None:
return deps_list
if not isinstance(deps, list):
raise SaltInvocationError(
"'deps' must be a Python list or comma-separated string"
)
for deprpm in deps:
parsed = urllib.parse.urlparse(deprpm)
depbase = os.path.basename(deprpm)
dest = os.path.join(tree_base, depbase)
if parsed.scheme:
__salt__["cp.get_url"](deprpm, dest, saltenv=saltenv)
else:
shutil.copy(deprpm, dest)
deps_list += " {}".format(dest)
return deps_list
def _check_repo_gpg_phrase_utils():
"""
Check for /usr/libexec/gpg-preset-passphrase is installed
"""
util_name = "/usr/libexec/gpg-preset-passphrase"
if __salt__["file.file_exists"](util_name):
return True
else:
raise CommandExecutionError(
"utility '{}' needs to be installed".format(util_name)
)
def _get_gpg_key_resources(keyid, env, use_passphrase, gnupghome, runas):
"""
Obtain gpg key resource infomation to sign repo files with
keyid
Optional Key ID to use in signing packages and repository.
Utilizes Public and Private keys associated with keyid which have
been loaded into the minion's Pillar data.
env
A dictionary of environment variables to be utilized in creating the
repository.
use_passphrase : False
Use a passphrase with the signing key presented in ``keyid``.
Passphrase is received from Pillar data which could be passed on the
command line with ``pillar`` parameter.
gnupghome : /etc/salt/gpgkeys
Location where GPG related files are stored, used with ``keyid``.
runas : root
User to create the repository as, and optionally sign packages.
.. note::
Ensure the user has correct permissions to any files and
directories which are to be utilized.
Returns:
tuple
use_gpg_agent True | False, Redhat 8 now makes use of a gpg-agent similar ot Debian
local_keyid key id to use in signing
define_gpg_name string containing definition to use with addsign (use_gpg_agent False)
phrase pass phrase (may not be used)
"""
local_keygrip_to_use = None
local_key_fingerprint = None
local_keyid = None
local_uids = None
define_gpg_name = ""
phrase = ""
retrc = 0
use_gpg_agent = False
if (
__grains__.get("os_family") == "RedHat"
and __grains__.get("osmajorrelease") >= 8
):
use_gpg_agent = True
if keyid is not None:
# import_keys
pkg_pub_key_file = "{}/{}".format(
gnupghome, __salt__["pillar.get"]("gpg_pkg_pub_keyname", None)
)
pkg_priv_key_file = "{}/{}".format(
gnupghome, __salt__["pillar.get"]("gpg_pkg_priv_keyname", None)
)
if pkg_pub_key_file is None or pkg_priv_key_file is None:
raise SaltInvocationError(
"Pillar data should contain Public and Private keys associated with"
" 'keyid'"
)
try:
__salt__["gpg.import_key"](
user=runas, filename=pkg_pub_key_file, gnupghome=gnupghome
)
__salt__["gpg.import_key"](
user=runas, filename=pkg_priv_key_file, gnupghome=gnupghome
)
except SaltInvocationError:
raise SaltInvocationError(
"Public and Private key files associated with Pillar data and 'keyid' "
"{} could not be found".format(keyid)
)
# gpg keys should have been loaded as part of setup
# retrieve specified key and preset passphrase
local_keys = __salt__["gpg.list_keys"](user=runas, gnupghome=gnupghome)
for gpg_key in local_keys:
if keyid == gpg_key["keyid"][8:]:
local_uids = gpg_key["uids"]
local_keyid = gpg_key["keyid"]
if use_gpg_agent:
local_keygrip_to_use = gpg_key["fingerprint"]
local_key_fingerprint = gpg_key["fingerprint"]
break
if use_gpg_agent:
cmd = "gpg --with-keygrip --list-secret-keys"
local_keys2_keygrip = __salt__["cmd.run"](cmd, runas=runas, env=env)
local_keys2 = iter(local_keys2_keygrip.splitlines())
try:
for line in local_keys2:
if line.startswith("sec"):
line_fingerprint = next(local_keys2).lstrip().rstrip()
if local_key_fingerprint == line_fingerprint:
lkeygrip = next(local_keys2).split("=")
local_keygrip_to_use = lkeygrip[1].lstrip().rstrip()
break
except StopIteration:
raise SaltInvocationError(
"unable to find keygrip associated with fingerprint '{}' for keyid"
" '{}'".format(local_key_fingerprint, local_keyid)
)
if local_keyid is None:
raise SaltInvocationError(
"The key ID '{}' was not found in GnuPG keyring at '{}'".format(
keyid, gnupghome
)
)
if use_passphrase:
phrase = __salt__["pillar.get"]("gpg_passphrase")
if use_gpg_agent:
_check_repo_gpg_phrase_utils()
cmd = (
"/usr/libexec/gpg-preset-passphrase --verbose --preset "
'--passphrase "{}" {}'.format(phrase, local_keygrip_to_use)
)
retrc = __salt__["cmd.retcode"](cmd, runas=runas, env=env)
if retrc != 0:
raise SaltInvocationError(
"Failed to preset passphrase, error {1}, "
"check logs for further details".format(retrc)
)
if local_uids:
define_gpg_name = (
"--define='%_signature gpg' --define='%_gpg_name {}'".format(
local_uids[0]
)
)
# need to update rpm with public key
cmd = "rpm --import {}".format(pkg_pub_key_file)
retrc = __salt__["cmd.retcode"](cmd, runas=runas, use_vt=True)
if retrc != 0:
raise SaltInvocationError(
"Failed to import public key from file {} with return "
"error {}, check logs for further details".format(
pkg_pub_key_file, retrc
)
)
return (use_gpg_agent, local_keyid, define_gpg_name, phrase)
def _sign_file(runas, define_gpg_name, phrase, abs_file, timeout):
"""
Sign file with provided key and definition
"""
SIGN_PROMPT_RE = re.compile(r"Enter pass phrase: ", re.M)
# interval of 0.125 is really too fast on some systems
interval = 0.5
number_retries = timeout / interval
times_looped = 0
error_msg = "Failed to sign file {}".format(abs_file)
cmd = "rpm {} --addsign {}".format(define_gpg_name, abs_file)
preexec_fn = functools.partial(salt.utils.user.chugid_and_umask, runas, None)
try:
stdout, stderr = None, None
proc = salt.utils.vt.Terminal(
cmd,
shell=True,
preexec_fn=preexec_fn,
stream_stdout=True,
stream_stderr=True,
)
while proc.has_unread_data:
stdout, stderr = proc.recv()
if stdout and SIGN_PROMPT_RE.search(stdout):
# have the prompt for inputting the passphrase
proc.sendline(phrase)
else:
times_looped += 1
if times_looped > number_retries:
raise SaltInvocationError(
"Attemping to sign file {} failed, timed out after {} seconds".format(
abs_file, int(times_looped * interval)
)
)
time.sleep(interval)
proc_exitstatus = proc.exitstatus
if proc_exitstatus != 0:
raise SaltInvocationError(
"Signing file {} failed with proc.status {}".format(
abs_file, proc_exitstatus
)
)
except salt.utils.vt.TerminalException as err:
trace = traceback.format_exc()
log.error(error_msg, err, trace)
finally:
proc.close(terminate=True, kill=True)
def _sign_files_with_gpg_agent(runas, local_keyid, abs_file, repodir, env, timeout):
"""
Sign file with provided key utilizing gpg-agent
"""
cmd = "rpmsign --verbose --key-id={} --addsign {}".format(local_keyid, abs_file)
retrc = __salt__["cmd.retcode"](cmd, runas=runas, cwd=repodir, use_vt=True, env=env)
if retrc != 0:
raise SaltInvocationError(
"Signing encountered errors for command '{}', "
"return error {}, check logs for further details".format(cmd, retrc)
)
def make_src_pkg(
dest_dir, spec, sources, env=None, template=None, saltenv="base", runas="root"
):
"""
Create a source rpm from the given spec file and sources
CLI Example:
.. code-block:: bash
salt '*' pkgbuild.make_src_pkg /var/www/html/
https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec
https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl SOURCE package and place it in
/var/www/html/ on the minion
.. versionchanged:: 2017.7.0
dest_dir
The directory on the minion to place the built package(s)
spec
The location of the spec file (used for rpms)
sources
The list of package sources
env
A dictionary of environment variables to be set prior to execution.
template
Run the spec file through a templating engine
Optional argument, allows for no templating engine used to be
if none is desired.
saltenv
The saltenv to use for files downloaded from the salt filesever
runas
The user to run the build process as
.. versionadded:: 2018.3.3
.. note::
using SHA256 as digest and minimum level dist el6
"""
_create_rpmmacros(runas)
tree_base = _mk_tree(runas)
spec_path = _get_spec(tree_base, spec, template, saltenv)
__salt__["file.chown"](path=spec_path, user=runas, group="mock")
__salt__["file.chown"](path=tree_base, user=runas, group="mock")
if isinstance(sources, str):
sources = sources.split(",")
for src in sources:
_get_src(tree_base, src, saltenv, runas)
# make source rpms for dist el6 with SHA256, usable with mock on other dists
cmd = 'rpmbuild --verbose --define "_topdir {}" -bs --define "dist .el6" {}'.format(
tree_base, spec_path
)
retrc = __salt__["cmd.retcode"](cmd, runas=runas)
if retrc != 0:
raise SaltInvocationError(
"Make source package for destination directory {}, spec {}, sources {},"
" failed with return error {}, check logs for further details".format(
dest_dir, spec, sources, retrc
)
)
srpms = os.path.join(tree_base, "SRPMS")
ret = []
if not os.path.isdir(dest_dir):
__salt__["file.makedirs_perms"](name=dest_dir, user=runas, group="mock")
for fn_ in os.listdir(srpms):
full = os.path.join(srpms, fn_)
tgt = os.path.join(dest_dir, fn_)
shutil.copy(full, tgt)
ret.append(tgt)
return ret
def build(
runas,
tgt,
dest_dir,
spec,
sources,
deps,
env,
template,
saltenv="base",
log_dir="/var/log/salt/pkgbuild",
):
"""
Given the package destination directory, the spec file source and package
sources, use mock to safely build the rpm defined in the spec file
CLI Example:
.. code-block:: bash
salt '*' pkgbuild.build mock epel-7-x86_64 /var/www/html
https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec
https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl package for rhel 7 using user
mock and place it in /var/www/html/ on the minion
"""
ret = {}
try:
__salt__["file.chown"](path=dest_dir, user=runas, group="mock")
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
srpm_dir = os.path.join(dest_dir, "SRPMS")
srpm_build_dir = tempfile.mkdtemp()
try:
srpms = make_src_pkg(
srpm_build_dir, spec, sources, env, template, saltenv, runas
)
except Exception as exc: # pylint: disable=broad-except
shutil.rmtree(srpm_build_dir)
log.error("Failed to make src package")
return ret
distset = _get_distset(tgt)
noclean = ""
deps_dir = tempfile.mkdtemp()
deps_list = _get_deps(deps, deps_dir, saltenv)
retrc = 0
for srpm in srpms:
dbase = os.path.dirname(srpm)
results_dir = tempfile.mkdtemp()
try:
__salt__["file.chown"](path=dbase, user=runas, group="mock")
__salt__["file.chown"](path=results_dir, user=runas, group="mock")
cmd = "mock --root={} --resultdir={} --init".format(tgt, results_dir)
retrc |= __salt__["cmd.retcode"](cmd, runas=runas)
if deps_list and not deps_list.isspace():
cmd = "mock --root={} --resultdir={} --install {} {}".format(
tgt, results_dir, deps_list, noclean
)
retrc |= __salt__["cmd.retcode"](cmd, runas=runas)
noclean += " --no-clean"
cmd = "mock --root={} --resultdir={} {} {} {}".format(
tgt, results_dir, distset, noclean, srpm
)
retrc |= __salt__["cmd.retcode"](cmd, runas=runas)
cmdlist = [
"rpm",
"-qp",
"--queryformat",
"{0}/%{{name}}/%{{version}}-%{{release}}".format(log_dir),
srpm,
]
log_dest = __salt__["cmd.run_stdout"](cmdlist, python_shell=False)
for filename in os.listdir(results_dir):
full = os.path.join(results_dir, filename)
if filename.endswith("src.rpm"):
sdest = os.path.join(srpm_dir, filename)
try:
__salt__["file.makedirs_perms"](
name=srpm_dir, user=runas, group="mock"
)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
shutil.copy(full, sdest)
ret.setdefault("Source Packages", []).append(sdest)
elif filename.endswith(".rpm"):
bdist = os.path.join(dest_dir, filename)
shutil.copy(full, bdist)
ret.setdefault("Packages", []).append(bdist)
else:
log_file = os.path.join(log_dest, filename)
try:
__salt__["file.makedirs_perms"](
name=log_dest, user=runas, group="mock"
)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
shutil.copy(full, log_file)
ret.setdefault("Log Files", []).append(log_file)
except Exception as exc: # pylint: disable=broad-except
log.error("Error building from %s: %s", srpm, exc)
finally:
shutil.rmtree(results_dir)
if retrc != 0:
raise SaltInvocationError(
"Building packages for destination directory {}, spec {}, sources {},"
" failed with return error {}, check logs for further details".format(
dest_dir, spec, sources, retrc
)
)
shutil.rmtree(deps_dir)
shutil.rmtree(srpm_build_dir)
return ret
def make_repo(
repodir,
keyid=None,
env=None,
use_passphrase=False,
gnupghome="/etc/salt/gpgkeys",
runas="root",
timeout=15.0,
):
"""
Make a package repository and optionally sign packages present
Given the repodir, create a ``yum`` repository out of the rpms therein
and optionally sign it and packages present, the name is directory to
turn into a repo. This state is best used with onchanges linked to
your package building states.
repodir
The directory to find packages that will be in the repository.
keyid
.. versionchanged:: 2016.3.0
Optional Key ID to use in signing packages and repository.
Utilizes Public and Private keys associated with keyid which have
been loaded into the minion's Pillar data.
For example, contents from a Pillar data file with named Public
and Private keys as follows:
.. code-block:: yaml
gpg_pkg_priv_key: |
-----BEGIN PGP PRIVATE KEY BLOCK-----
Version: GnuPG v1
lQO+BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
Ebe+8JCQTwqSXPRTzXmy/b5WXDeM79CkLWvuGpXFor76D+ECMRPv/rawukEcNptn
R5OmgHqvydEnO4pWbn8JzQO9YX/Us0SMHBVzLC8eIi5ZIopzalvX
=JvW8
-----END PGP PRIVATE KEY BLOCK-----
gpg_pkg_priv_keyname: gpg_pkg_key.pem
gpg_pkg_pub_key: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta+4alcWivvoP
4QIxE+/+trC6QRw2m2dHk6aAeq/J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki
inNqW9c=
=s1CX
-----END PGP PUBLIC KEY BLOCK-----
gpg_pkg_pub_keyname: gpg_pkg_key.pub
env
.. versionchanged:: 2016.3.0
A dictionary of environment variables to be utilized in creating the
repository.
.. note::
This parameter is not used for making ``yum`` repositories.
use_passphrase : False
.. versionadded:: 2016.3.0
Use a passphrase with the signing key presented in ``keyid``.
Passphrase is received from Pillar data which could be passed on the
command line with ``pillar`` parameter.
.. code-block:: bash
pillar='{ "gpg_passphrase" : "my_passphrase" }'
.. versionadded:: 3001.1
RHEL 8 and above leverages gpg-agent and gpg-preset-passphrase for
caching keys, etc.
gnupghome : /etc/salt/gpgkeys
.. versionadded:: 2016.3.0
Location where GPG related files are stored, used with ``keyid``.
runas : root
.. versionadded:: 2016.3.0
User to create the repository as, and optionally sign packages.
.. note::
Ensure the user has correct permissions to any files and
directories which are to be utilized.
timeout : 15.0
.. versionadded:: 2016.3.4
Timeout in seconds to wait for the prompt for inputting the passphrase.
CLI Example:
.. code-block:: bash
salt '*' pkgbuild.make_repo /var/www/html/
"""
home = os.path.expanduser("~" + runas)
rpmmacros = os.path.join(home, ".rpmmacros")
if not os.path.exists(rpmmacros):
_create_rpmmacros(runas)
if gnupghome and env is None:
env = {}
env["GNUPGHOME"] = gnupghome
use_gpg_agent, local_keyid, define_gpg_name, phrase = _get_gpg_key_resources(
keyid, env, use_passphrase, gnupghome, runas
)
# sign_it_here
for fileused in os.listdir(repodir):
if fileused.endswith(".rpm"):
abs_file = os.path.join(repodir, fileused)
if use_gpg_agent:
_sign_files_with_gpg_agent(
runas, local_keyid, abs_file, repodir, env, timeout
)
else:
_sign_file(runas, define_gpg_name, phrase, abs_file, timeout)
cmd = "createrepo --update {}".format(repodir)
retrc = __salt__["cmd.run_all"](cmd, runas=runas)
return retrc
| {
"content_hash": "4ecfc2dc9767bcac0279965d99c8ec9f",
"timestamp": "",
"source": "github",
"line_count": 758,
"max_line_length": 106,
"avg_line_length": 33.138522427440634,
"alnum_prop": 0.5723157769019467,
"repo_name": "saltstack/salt",
"id": "1532817222ff2b28aa49abf860f085fd4de908e4",
"size": "25119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/modules/rpmbuild_pkgbuild.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
} |
"""Simple Flask application served by pulsar WSGI server on a pool of threads
"""
from flask import Flask, make_response
from pulsar.apps import wsgi
def FlaskApp():
app = Flask(__name__)
@app.errorhandler(404)
def not_found(e):
return make_response("404 Page", 404)
@app.route('/', methods=['GET'])
def add_org():
return "Flask Example"
return app
class Site(wsgi.LazyWsgi):
def setup(self, environ=None):
app = FlaskApp()
return wsgi.WsgiHandler((wsgi.wait_for_body_middleware,
wsgi.middleware_in_executor(app)))
def server(**kwargs):
return wsgi.WSGIServer(Site(), **kwargs)
if __name__ == '__main__': # pragma nocover
server().start()
| {
"content_hash": "a734d31f00daed86949e261a348d1077",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 77,
"avg_line_length": 21.62857142857143,
"alnum_prop": 0.607661822985469,
"repo_name": "quantmind/pulsar",
"id": "d6d9ad4108c1df1d4bdf6af943df917d69f3be15",
"size": "757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/flaskapp/manage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "C",
"bytes": "1366"
},
{
"name": "CSS",
"bytes": "1302"
},
{
"name": "HTML",
"bytes": "1085"
},
{
"name": "JavaScript",
"bytes": "116"
},
{
"name": "Makefile",
"bytes": "2272"
},
{
"name": "Python",
"bytes": "1140291"
},
{
"name": "Shell",
"bytes": "2164"
}
],
"symlink_target": ""
} |
import base64
import mimetypes
from urllib import urlencode
from urlparse import urlparse, urlunparse
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but abritrary
if parameters are supplied as a dict.
"""
if isinstance(data, basestring):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in data.iteritems():
for v in isinstance(vs, list) and vs or [vs]:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, unicode) else k,
v.encode('utf-8') if isinstance(v, unicode) else v))
return urlencode(result, doseq=True)
else:
return data
def _utf8(key):
if not isinstance(key, basestring):
key = str(key)
return key.encode('utf-8') if isinstance(key, unicode) else key
def _encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for key, value in fields.iteritems():
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % _utf8(key))
L.append('')
L.append(_utf8(value))
for key, (filename, value) in files.iteritems():
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (_utf8(key), _utf8(filename)))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value.read() if hasattr(value, "read") else _utf8(value))
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def _build_url(url, _params):
"""Build the actual URL to use."""
# Support for unicode domain names and paths.
scheme, netloc, path, params, query, fragment = urlparse(url)
netloc = netloc.encode('idna').decode('utf-8')
if not path:
path = '/'
if isinstance(scheme, unicode):
scheme = scheme.encode('utf-8')
if isinstance(netloc, unicode):
netloc = netloc.encode('utf-8')
if isinstance(path, unicode):
path = path.encode('utf-8')
if isinstance(params, unicode):
params = params.encode('utf-8')
if isinstance(query, unicode):
query = query.encode('utf-8')
if isinstance(fragment, unicode):
fragment = fragment.encode('utf-8')
enc_params = _encode_params(_params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = (urlunparse([scheme, netloc, path, params, query, fragment]))
return url
def quote_chinese(url, encodeing="utf-8"):
if isinstance(url, unicode):
return quote_chinese(url.encode("utf-8"))
res = [b if ord(b) < 128 else '%%%02X' % (ord(b)) for b in url]
return "".join(res)
def xunlei_url_decode(url):
url = url.split('&')[0]
url = url[10:].decode('base64')
assert url.startswith('AA') and url.endswith('ZZ'), 'xunlei url format error'
return url[2:-2]
def flashget_url_decode(url):
url = url.split('&')[0]
url = url[11:].decode('base64')
assert url.startswith('[FLASHGET]') and url.endswith('[FLASHGET]'), 'flashget url format error'
return url[10:-10]
def flashgetx_url_decode(url):
url = url.split('&')[0]
name, size, hash, end = url.split('|')[2:]
assert end == '/', 'flashgetx url format error'
return 'ed2k://|file|' + name.decode('base64') + '|' + size + '|' + hash + '/'
def qqdl_url_decode(url):
url = url.split('&')[0]
return base64.decodestring(url[7:])
def url_unmask(url):
url_lower = url.lower()
if url_lower.startswith('thunder://'):
url = xunlei_url_decode(url)
elif url_lower.startswith('flashget://'):
url = flashget_url_decode(url)
elif url_lower.startswith('flashgetx://'):
url = flashgetx_url_decode(url)
elif url_lower.startswith('qqdl://'):
url = qqdl_url_decode(url)
return quote_chinese(url)
if __name__ == "__main__":
assert _build_url("http://httpbin.org", {'id': 123}) == "http://httpbin.org/?id=123"
assert _build_url("http://httpbin.org/get", {'id': 123}) == "http://httpbin.org/get?id=123"
assert _encode_params({'id': 123, 'foo': 'fdsa'}) == "foo=fdsa&id=123"
assert _encode_params({'id': "中文"}) == "id=%E4%B8%AD%E6%96%87"
print _encode_multipart_formdata({'id': 123}, {'key': ('file.name', 'content')})
| {
"content_hash": "b2ffdb54dfed9ac57ca14f8b4b8b9242",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 108,
"avg_line_length": 34.285714285714285,
"alnum_prop": 0.5823863636363636,
"repo_name": "alexknight/jobscrawler",
"id": "3e2c6259a31d25c131fe6e9f3d8368d1aa96f87a",
"size": "5311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/url.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15007"
}
],
"symlink_target": ""
} |
'''
Config holds the various processing parameters for each of the
individual packages
''' | {
"content_hash": "9772a4e59ea491c0e84b73842010be70",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 62,
"avg_line_length": 22.5,
"alnum_prop": 0.7888888888888889,
"repo_name": "dabillox/kcl-fire-aot",
"id": "593bca0cbbf5f63d4b5e35f7db94cad7c3b15b38",
"size": "90",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/config/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2923893"
},
{
"name": "Makefile",
"bytes": "1204"
},
{
"name": "Perl",
"bytes": "6697"
},
{
"name": "Python",
"bytes": "318547"
}
],
"symlink_target": ""
} |
import re, time
import urllib, urllib2
from bs4 import BeautifulSoup
from bs4 import UnicodeDammit
from util.functional import try_times
from Queue import Queue
import threading
from dao.dbSUBMIT import Submit
from dao.dbBase import db
import datetime
| {
"content_hash": "5709773b1505e34a9de6b17a889dff39",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 37,
"avg_line_length": 19.615384615384617,
"alnum_prop": 0.8313725490196079,
"repo_name": "Z2Y/CUIT-ACM-Spider",
"id": "325543013ee0b3c119c2ef273ffbf1c87d8a092b",
"size": "255",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "spider/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57437"
}
],
"symlink_target": ""
} |
__copyright__ = 'Copyright(c) Gordon Elliott 2017'
"""
"""
import logging
from datetime import datetime
from glod.db.person import Person
from glod.db.organisation import Organisation, OrganisationCategory, OrganisationStatus
from glod.model.communication_permission import CommunicationPermission
from glod.db.organisation_address import OrganisationAddress
LOG = logging.getLogger(__file__)
INITIAL_GDPR_SURVEY = datetime(2018, 10, 30)
TRUE_STRINGS = ("true", "True", "TRUE", "yes", "Yes", "YES", "1")
IS_PRIMARY = 'primary'
def _reorganise_parishioner(parishioner, address_map, household_map):
new_entities = []
parishioner_status = parishioner.status.lower()
if parishioner_status == 'foreign list':
organisation_status = OrganisationStatus.Active
organisation_category = OrganisationCategory.NonLocalHousehold
else:
organisation_status = OrganisationStatus.Active if parishioner_status == 'active' else OrganisationStatus.Inactive
organisation_category = OrganisationCategory.Household
household_ref_no = parishioner.household_ref_no
if household_ref_no in household_map:
household = household_map[household_ref_no]
else:
household = Organisation(
parishioner.surname,
organisation_category,
organisation_status,
household_ref_no,
)
address = address_map[household_ref_no]
oa_link = OrganisationAddress(household, address)
household_map[household_ref_no] = household
new_entities = [household, oa_link]
person = Person(
household,
parishioner.surname,
parishioner.first_name,
title=parishioner.title,
mobile=parishioner.mobile,
other_phone=parishioner.other,
email=parishioner.email,
parishioner_reference_no=parishioner.reference_no,
)
communication_preferences = CommunicationPermission(
person,
parishioner.main_contact == IS_PRIMARY,
INITIAL_GDPR_SURVEY,
parishioner.by_email in TRUE_STRINGS,
parishioner.by_phone in TRUE_STRINGS,
parishioner.by_post in TRUE_STRINGS,
parishioner.news in TRUE_STRINGS,
parishioner.finance in TRUE_STRINGS,
)
new_entities += [person, communication_preferences]
return new_entities
def reorganise_parishioners(session, parishioners, address_map):
household_map = {}
for parishioner in parishioners:
new_entities = _reorganise_parishioner(parishioner, address_map, household_map)
session.add_all(new_entities)
| {
"content_hash": "d793adb902102289ce6c4eeb563c550c",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 122,
"avg_line_length": 33.81818181818182,
"alnum_prop": 0.695084485407066,
"repo_name": "gordon-elliott/glod",
"id": "0feb73925a0d17589a3e6cfa1237061e11c7d7d3",
"size": "2604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/glod/in_out/organisation.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "477"
},
{
"name": "JavaScript",
"bytes": "84666"
},
{
"name": "Mako",
"bytes": "546"
},
{
"name": "Python",
"bytes": "2641538"
}
],
"symlink_target": ""
} |
'''
Created on 3 Aug 2014
@author: ankur
'''
import logging
from ondelabs.copilot.dao.DiskDAO import DiskDAO
from ondelabs.copilot.model.TextClassType import TextClassType
from ondelabs.copilot.model.ValidationData import ValidationData
class DiskValidationDAO(DiskDAO):
def __init__(self, filename):
super(DiskValidationDAO, self).__init__(filename)
def loadData(self):
result = {}
lines = self._getLinesFromFile()
for line in lines:
textClassType = self.__separateTextAndClassType(line)
if textClassType is not None:
result[line] = textClassType
return ValidationData(result)
def __separateTextAndClassType(self, line):
index = line.rfind(' ')
if index is -1:
logging.warn('Could not find valid class type in ' + line)
return None
else:
if self._isInt(line[index:]) is False:
logging.warn('classType is not an int')
return None
else:
return TextClassType(self._removeNoiseFromText(line[:index]), int(line[index:]))
def _removeNoiseFromText(self, text):
# Override so that the text can be constructed in
# a more preferable way e.g. remove new lines;
# remove symbols; all lower case.
return text
| {
"content_hash": "7afcd8dd7ce98eecc4b2e24501c38d1b",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 96,
"avg_line_length": 29.934782608695652,
"alnum_prop": 0.616557734204793,
"repo_name": "ankur22/Butler",
"id": "4c04939f2e2b493025c3eeadcad18ceeaf1fe3b9",
"size": "1377",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ondelabs/copilot/dao/DiskValidationDAO.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "124701"
}
],
"symlink_target": ""
} |
'''
Created by auto_sdk on 2014-12-17 17:22:51
'''
from top.api.base import RestApi
class InventoryAdjustExternalRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.biz_type = None
self.biz_unique_code = None
self.items = None
self.occupy_operate_code = None
self.operate_time = None
self.operate_type = None
self.reduce_type = None
self.store_code = None
def getapiname(self):
return 'taobao.inventory.adjust.external'
| {
"content_hash": "b85df5379f331949a02c1a6ed70df439",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 55,
"avg_line_length": 29.055555555555557,
"alnum_prop": 0.6921606118546845,
"repo_name": "CooperLuan/devops.notes",
"id": "068f704eb194ea1c7b4741467d6fa9ac9f790911",
"size": "523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taobao/top/api/rest/InventoryAdjustExternalRequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1505"
},
{
"name": "JavaScript",
"bytes": "29"
},
{
"name": "Python",
"bytes": "211546"
},
{
"name": "Shell",
"bytes": "150"
}
],
"symlink_target": ""
} |
"""
Launch a Docker image with Ubuntu and LXDE window manager, and
automatically open up the URL in the default web browser.
"""
# Author: Xiangmin Jiao <xmjiao@gmail.com>
from __future__ import print_function # Only Python 2.x
import sys
import subprocess
import time
APP = "docker"
def parse_args(description):
"Parse command-line arguments"
import argparse
# Process command-line arguments
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-i', '--image',
help='The Docker image to use. ' +
'The default is x11vnc/docker-desktop.',
default="x11vnc/docker-desktop")
parser.add_argument('-t', '--tag',
help='Tag of the image. The default is latest. ' +
'If the image already has a tag, its tag prevails.',
default="")
parser.add_argument('-v', '--volume',
help='A data volume to be mounted to ~/project.',
default=APP+"-project")
parser.add_argument('-p', '--pull',
help='Pull the latest Docker image. ' +
'The default is not to pull.',
action='store_true',
default=False)
parser.add_argument('-r', '--reset',
help='Reset configurations to default.',
action='store_true',
default=False)
parser.add_argument('-d', '--detach',
help='Run in background and print container id',
action='store_true',
default=False)
parser.add_argument('-s', '--size',
help='Size of the screen. The default is to use ' +
'the current screen size.',
default="")
parser.add_argument('-A', '--audio',
help='Mount the sound device ' +
'(Linux only, experimental, sudo required).',
default="")
parser.add_argument('-V', '--nvidia',
help='Mount the Nvidia card for GPU computatio. ' +
'(Linux only, experimental, sudo required).',
default="")
parser.add_argument('-n', '--no-browser',
help='Do not start web browser',
action='store_true',
default=False)
parser.add_argument('-a', '--args',
help='All the arguments after -a will be passed to the ' +
'"docker run" command. Useful for specifying ' +
'resources and environment variables.',
nargs=argparse.REMAINDER,
default=[])
args = parser.parse_args()
# Append tag to image if the image has no tag
if args.image.find(':') < 0:
if not args.tag:
pass
else:
args.image += ':' + args.tag
return args
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
import random
for i in range(min(5, n)):
yield port + i
for i in range(n - 5):
yield max(1, port + random.randint(-2 * n, 2 * n))
def id_generator(size=6):
"""Generate a container ID"""
import random
import string
chars = string.ascii_lowercase
return APP + "-" + (''.join(random.choice(chars) for _ in range(size)))
def find_free_port(port, retries):
"Find a free port"
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for prt in random_ports(port, retries + 1):
try:
sock.bind(("127.0.0.1", prt))
sock.close()
return prt
except socket.error:
continue
sys.stderr.write("Error: Could not find a free port.\n")
sys.exit(-1)
def wait_net_service(port, timeout=30):
""" Wait for network service to appear.
"""
import socket
for _ in range(timeout * 10):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", port))
except socket.error:
sock.close()
time.sleep(0.1)
continue
else:
sock.close()
time.sleep(3)
return True
def get_screen_resolution():
"""Obtain the local screen resolution."""
try:
if sys.version_info.major > 2:
import tkinter as tk
else:
import Tkinter as tk
root = tk.Tk()
root.withdraw()
width, height = root.winfo_screenwidth(), root.winfo_screenheight()
return str(width) + 'x' + str(height)
except:
return ""
def handle_interrupt(container):
"""Handle keyboard interrupt"""
try:
print("Press Ctrl-C again to stop the server: ")
time.sleep(5)
print('Invalid response. Resuming...')
except KeyboardInterrupt:
print('*** Stopping the server.')
subprocess.Popen(["docker", "exec", container,
"killall", "my_init"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sys.exit(0)
if __name__ == "__main__":
import os
import webbrowser
import platform
import glob
args = parse_args(description=__doc__)
pwd = os.getcwd()
homedir = os.path.expanduser('~')
if platform.system() == "Linux":
if subprocess.check_output(['groups']).find(b'docker') < 0:
print('You are not a member of the docker group. Please add')
print('yourself to the docker group using the following command:')
print(' sudo addgroup $USER docker')
print('Then, log out and log back in before you can use Docker.')
sys.exit(-1)
uid = str(os.getuid())
if uid == '0':
print('You are running as root. This is not safe. ' +
'Please run as a regular user.')
sys.exit(-1)
else:
uid = ""
try:
img = subprocess.check_output(['docker', 'images', '-q', args.image])
except:
sys.stderr.write("Docker failed. Please make sure docker was properly " +
"installed and has been started.\n")
sys.exit(-1)
if args.pull or not img:
try:
err = subprocess.call(["docker", "pull", args.image])
except BaseException:
err = -1
if err:
sys.exit(err)
# Delete dangling image
if img and subprocess.check_output(['docker', 'images', '-f',
'dangling=true',
'-q']).find(img) >= 0:
subprocess.Popen(["docker", "rmi", "-f", img.decode('utf-8')[:-1]])
# Create directory .ssh if not exist
if not os.path.exists(homedir + "/.ssh"):
os.mkdir(homedir + "/.ssh")
user = "ubuntu"
docker_home = "/home/ubuntu"
if args.reset:
try:
output = subprocess.check_output(["docker", "volume", "rm", "-f",
APP + args.tag + "_config"])
except subprocess.CalledProcessError as e:
sys.stderr.write(e.output.decode('utf-8'))
volumes = ["-v", "/var/run/docker.sock:/var/run/docker.sock",
"-v", pwd + ":" + docker_home + "/shared",
"-v", APP + args.tag + "_config:" + docker_home + "/.config",
"-v", homedir + "/.ssh" + ":" + docker_home + "/.ssh"]
if os.path.exists(homedir + "/.gnupg"):
volumes += ["-v", homedir + "/.gnupg" +
":" + docker_home + "/.gnupg"]
# Mount .gitconfig to Docker image
if os.path.isfile(homedir + "/.gitconfig"):
volumes += ["-v", homedir + "/.gitconfig" +
":" + docker_home + "/.gitconfig_host"]
if args.volume:
volumes += ["-v", args.volume + ":" + docker_home + "/project",
"-w", docker_home + "/project"]
else:
volumes += ["-w", docker_home + "/shared"]
sys.stderr.write("Starting up docker image...\n")
if subprocess.check_output(["docker", "--version"]). \
find(b"Docker version 1.") >= 0:
rmflag = "-t"
else:
rmflag = "--rm"
# Determine size of the desktop
if not args.size:
size = get_screen_resolution()
if not size:
# Set default size and disable webbrowser
size = "1440x900"
args.no_browser = True
else:
size = args.size
# Generate a container ID
container = id_generator()
envs = ["--hostname", container,
"--env", "RESOLUT=" + size,
"--env", "HOST_UID=" + uid]
devices = []
if args.audio and os.path.exists('/dev/snd'):
devices += ["--device", "/dev/snd"]
if args.nvidia:
for d in glob.glob('/dev/nvidia*'):
devices += ['--device', d + ':' + d]
# Start the docker image in the background and pipe the stderr
port_http = str(find_free_port(6080, 50))
port_vnc = str(find_free_port(5950, 50))
subprocess.call(["docker", "run", "-d", rmflag, "--name", container,
"-p", "127.0.0.1:" + port_http + ":6080",
"-p", "127.0.0.1:" + port_vnc + ":5900"] +
envs + volumes + devices + args.args +
['--security-opt', 'seccomp=unconfined',
args.image, "startvnc.sh >> " +
docker_home + "/.log/vnc.log"])
wait_for_url = True
# Wait for user to press Ctrl-C
while True:
try:
if wait_for_url:
# Wait until the file is not empty
while not subprocess.check_output(["docker", "exec", container,
"cat", docker_home +
"/.log/vnc.log"]):
time.sleep(1)
p = subprocess.Popen(["docker", "exec", container,
"tail", "-F",
docker_home + "/.log/vnc.log"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
# Monitor the stdout to extract the URL
for stdout_line in iter(p.stdout.readline, ""):
ind = stdout_line.find("http://localhost:")
if ind >= 0:
# Open browser if found URL
url = stdout_line.replace(":6080/",
':' + port_http + "/")
sys.stdout.write(url)
passwd = stdout_line[url.find('password=') + 9:]
sys.stdout.write("\nFor a better experience, use VNC Viewer (" +
'http://realvnc.com/download/viewer)\n' +
"to connect to localhost:%s with password %s\n" %
(port_vnc, passwd))
if not args.no_browser:
wait_net_service(int(port_http))
webbrowser.open(url[ind:-1])
p.stdout.close()
p.terminate()
wait_for_url = False
break
else:
sys.stdout.write(stdout_line)
if args.detach:
print('Started container ' + container + ' in background.')
print('To stop it, use "docker stop ' + container + '".')
sys.exit(0)
print("Press Ctrl-C to stop the server.")
# Wait till the container exits or Ctlr-C is pressed
subprocess.check_output(["docker", "exec", container,
"tail", "-f", "/dev/null"])
except subprocess.CalledProcessError:
try:
# If Docker process no long exists, exit
if not subprocess.check_output(['docker', 'ps',
'-q', '-f',
'name=' + container]):
sys.stderr.write('Docker container ' +
container + ' is no longer running\n')
sys.exit(-1)
else:
time.sleep(1)
continue
except subprocess.CalledProcessError:
sys.stderr.write('Docker container ' +
container + ' is no longer running\n')
sys.exit(-1)
except KeyboardInterrupt:
handle_interrupt(container)
continue
except KeyboardInterrupt:
handle_interrupt(container)
except OSError:
sys.exit(-1)
| {
"content_hash": "15197cddae78d0620fe41cee8b078865",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 90,
"avg_line_length": 34.52577319587629,
"alnum_prop": 0.4794714840250821,
"repo_name": "x11vnc/docker-desktop",
"id": "26be5cf07d8e8e63d9434f132a4a0c130399d4b5",
"size": "13419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker_desktop.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1538"
},
{
"name": "Python",
"bytes": "13419"
},
{
"name": "Shell",
"bytes": "297"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.